From 3f29fd04e1ffbbc867bee593d796d063a1dba20d Mon Sep 17 00:00:00 2001 From: Svyatoslav Nikolsky Date: Wed, 6 Jul 2022 09:54:38 +0300 Subject: [PATCH] Remove bridges subtree (#5752) * remove bridges folder * remove BRIDGES.md * remove bridges mentions from top Cargo.toml * remove bridges from CODEOWNERS * remove bridges mentions from testing.md * remove bridge pallets from Rococo/Wococo runtime * also remove from node --- polkadot/.github/CODEOWNERS | 2 - polkadot/BRIDGES.md | 47 - polkadot/Cargo.lock | 228 -- polkadot/Cargo.toml | 4 - polkadot/bridges/.config/lingua.dic | 243 -- polkadot/bridges/.config/spellcheck.toml | 13 - polkadot/bridges/.dockerignore | 1 - polkadot/bridges/.editorconfig | 19 - polkadot/bridges/.github/dependabot.yml | 42 - polkadot/bridges/.gitignore | 26 - polkadot/bridges/.gitlab-ci.yml | 298 --- .../.maintain/millau-weight-template.hbs | 103 - polkadot/bridges/CODEOWNERS | 21 - polkadot/bridges/CODE_OF_CONDUCT.md | 80 - polkadot/bridges/Dockerfile | 71 - polkadot/bridges/LICENSE | 675 ----- polkadot/bridges/README.md | 247 -- polkadot/bridges/bin/.keep | 0 polkadot/bridges/bin/millau/node/Cargo.toml | 63 - polkadot/bridges/bin/millau/node/build.rs | 23 - .../bridges/bin/millau/node/src/chain_spec.rs | 229 -- polkadot/bridges/bin/millau/node/src/cli.rs | 71 - .../bridges/bin/millau/node/src/command.rs | 153 -- polkadot/bridges/bin/millau/node/src/lib.rs | 32 - polkadot/bridges/bin/millau/node/src/main.rs | 30 - .../bridges/bin/millau/node/src/service.rs | 453 ---- .../bridges/bin/millau/runtime/Cargo.toml | 135 - polkadot/bridges/bin/millau/runtime/build.rs | 25 - .../bridges/bin/millau/runtime/src/lib.rs | 1004 ------- .../bin/millau/runtime/src/rialto_messages.rs | 428 --- .../bin/rialto-parachain/node/Cargo.toml | 91 - .../bin/rialto-parachain/node/build.rs | 22 - .../rialto-parachain/node/src/chain_spec.rs | 166 -- .../bin/rialto-parachain/node/src/cli.rs | 140 - .../bin/rialto-parachain/node/src/command.rs | 441 ---- .../bin/rialto-parachain/node/src/lib.rs | 18 - .../bin/rialto-parachain/node/src/main.rs | 29 - .../bin/rialto-parachain/node/src/service.rs | 521 ---- .../bin/rialto-parachain/runtime/Cargo.toml | 122 - .../bin/rialto-parachain/runtime/build.rs | 25 - .../bin/rialto-parachain/runtime/src/lib.rs | 663 ----- polkadot/bridges/bin/rialto/node/Cargo.toml | 92 - polkadot/bridges/bin/rialto/node/build.rs | 23 - .../bridges/bin/rialto/node/src/chain_spec.rs | 302 --- polkadot/bridges/bin/rialto/node/src/cli.rs | 86 - .../bridges/bin/rialto/node/src/command.rs | 221 -- polkadot/bridges/bin/rialto/node/src/main.rs | 28 - .../bridges/bin/rialto/runtime/Cargo.toml | 146 -- polkadot/bridges/bin/rialto/runtime/build.rs | 25 - .../bridges/bin/rialto/runtime/src/lib.rs | 998 ------- .../bin/rialto/runtime/src/millau_messages.rs | 558 ---- .../bin/rialto/runtime/src/parachains.rs | 180 -- .../bridges/bin/runtime-common/Cargo.toml | 73 - polkadot/bridges/bin/runtime-common/README.md | 181 -- .../bin/runtime-common/src/integrity.rs | 331 --- .../bridges/bin/runtime-common/src/lib.rs | 26 - .../bin/runtime-common/src/messages.rs | 1661 ------------ .../bin/runtime-common/src/messages_api.rs | 51 - .../src/messages_benchmarking.rs | 380 --- polkadot/bridges/ci.Dockerfile | 53 - polkadot/bridges/deny.toml | 202 -- .../bridges/deployments/BridgeDeps.Dockerfile | 32 - polkadot/bridges/deployments/README.md | 247 -- ...y-millau-to-rialto-messages-dashboard.json | 1684 ------------ ...y-rialto-to-millau-messages-dashboard.json | 1433 ---------- .../rialto-millau-maintenance-dashboard.json | 1059 -------- .../dashboard/prometheus/targets.yml | 4 - .../bridges/rialto-millau/docker-compose.yml | 116 - ...ay-messages-millau-to-rialto-entrypoint.sh | 16 - ...ay-messages-rialto-to-millau-entrypoint.sh | 16 - ...messages-to-millau-generator-entrypoint.sh | 143 - ...messages-to-rialto-generator-entrypoint.sh | 143 - ...ssages-to-rialto-resubmitter-entrypoint.sh | 25 - .../relay-millau-rialto-entrypoint.sh | 36 - .../relay-token-swap-generator-entrypoint.sh | 45 - ...y-westend-to-millau-headers-dashboard.json | 781 ------ .../dashboard/prometheus/targets.yml | 2 - .../bridges/westend-millau/docker-compose.yml | 31 - ...ay-headers-westend-to-millau-entrypoint.sh | 24 - .../local-scripts/bridge-entrypoint.sh | 7 - .../relay-headers-rococo-to-wococo.sh | 24 - .../relay-headers-wococo-to-rococo.sh | 24 - .../relay-messages-millau-to-rialto.sh | 21 - .../relay-messages-rialto-to-millau.sh | 21 - .../local-scripts/relay-millau-to-rialto.sh | 29 - .../local-scripts/relay-rialto-to-millau.sh | 27 - .../local-scripts/run-millau-node.sh | 11 - .../local-scripts/run-rialto-node.sh | 11 - .../local-scripts/run-rococo-node.sh | 14 - .../local-scripts/run-westend-node.sh | 14 - .../local-scripts/run-wococo-node.sh | 14 - .../monitoring/GrafanaMatrix.Dockerfile | 18 - .../deployments/monitoring/disabled.yml | 15 - .../deployments/monitoring/docker-compose.yml | 32 - .../monitoring/grafana-matrix/config.yml | 49 - .../dashboards/grafana-dashboard.yaml | 6 - .../datasources/grafana-datasource.yaml | 16 - .../notifiers/grafana-notifier.yaml | 15 - .../monitoring/prometheus/prometheus.yml | 7 - .../dashboard/grafana/beefy-dashboard.json | 539 ---- .../dashboard/prometheus/millau-targets.yml | 2 - .../dashboard/prometheus/rialto-targets.yml | 2 - .../rialto-chainspec-exporter-entrypoint.sh | 14 - .../rialto-parachain-registrar-entrypoint.sh | 11 - .../bridges/deployments/networks/millau.yml | 101 - .../deployments/networks/rialto-parachain.yml | 90 - .../bridges/deployments/networks/rialto.yml | 118 - .../deployments/reverse-proxy/README.md | 15 - .../reverse-proxy/docker-compose.yml | 42 - polkadot/bridges/deployments/run.sh | 159 -- .../bridges/deployments/types-millau.json | 192 -- .../bridges/deployments/types-rialto.json | 192 -- .../bridges/deployments/types-rococo.json | 151 -- .../bridges/deployments/types-wococo.json | 152 -- polkadot/bridges/deployments/types/build.sh | 22 - .../bridges/deployments/types/common.json | 124 - .../bridges/deployments/types/millau.json | 17 - .../deployments/types/rialto-millau.json | 56 - .../bridges/deployments/types/rialto.json | 17 - .../deployments/types/rococo-wococo.json | 16 - .../bridges/deployments/types/rococo.json | 16 - .../bridges/deployments/types/wococo.json | 17 - polkadot/bridges/deployments/ui/README.md | 23 - .../bridges/deployments/ui/docker-compose.yml | 13 - polkadot/bridges/docs/high-level-overview.md | 165 -- polkadot/bridges/docs/high-level.html | 55 - polkadot/bridges/docs/plan.md | 22 - polkadot/bridges/docs/scenario1.html | 47 - polkadot/bridges/docs/send-message.md | 131 - polkadot/bridges/docs/testing-scenarios.md | 221 -- .../bridges/fuzz/storage-proof/Cargo.lock | 2252 ---------------- .../bridges/fuzz/storage-proof/Cargo.toml | 25 - polkadot/bridges/fuzz/storage-proof/README.md | 34 - .../bridges/fuzz/storage-proof/src/main.rs | 80 - polkadot/bridges/modules/dispatch/Cargo.toml | 43 - polkadot/bridges/modules/dispatch/README.md | 63 - polkadot/bridges/modules/dispatch/src/lib.rs | 1108 -------- polkadot/bridges/modules/grandpa/Cargo.toml | 62 - .../modules/grandpa/src/benchmarking.rs | 121 - polkadot/bridges/modules/grandpa/src/lib.rs | 1165 --------- polkadot/bridges/modules/grandpa/src/mock.rs | 126 - .../bridges/modules/grandpa/src/weights.rs | 75 - polkadot/bridges/modules/messages/Cargo.toml | 55 - polkadot/bridges/modules/messages/README.md | 424 --- .../modules/messages/src/benchmarking.rs | 668 ----- .../modules/messages/src/inbound_lane.rs | 504 ---- .../modules/messages/src/instant_payments.rs | 350 --- polkadot/bridges/modules/messages/src/lib.rs | 2310 ----------------- polkadot/bridges/modules/messages/src/mock.rs | 562 ---- .../modules/messages/src/outbound_lane.rs | 462 ---- .../bridges/modules/messages/src/weights.rs | 216 -- .../modules/messages/src/weights_ext.rs | 397 --- .../modules/shift-session-manager/Cargo.toml | 35 - .../modules/shift-session-manager/src/lib.rs | 249 -- .../bridges/modules/token-swap/Cargo.toml | 59 - .../modules/token-swap/src/benchmarking.rs | 198 -- .../bridges/modules/token-swap/src/lib.rs | 1192 --------- .../bridges/modules/token-swap/src/mock.rs | 200 -- .../bridges/modules/token-swap/src/weights.rs | 93 - .../modules/token-swap/src/weights_ext.rs | 42 - .../primitives/chain-kusama/Cargo.toml | 37 - .../primitives/chain-kusama/src/lib.rs | 148 -- .../primitives/chain-millau/Cargo.toml | 54 - .../primitives/chain-millau/src/lib.rs | 341 --- .../chain-millau/src/millau_hash.rs | 58 - .../primitives/chain-polkadot/Cargo.toml | 37 - .../primitives/chain-polkadot/src/lib.rs | 148 -- .../chain-rialto-parachain/Cargo.toml | 36 - .../chain-rialto-parachain/src/lib.rs | 124 - .../primitives/chain-rialto/Cargo.toml | 36 - .../primitives/chain-rialto/src/lib.rs | 307 --- .../primitives/chain-rococo/Cargo.toml | 37 - .../primitives/chain-rococo/src/lib.rs | 144 - .../primitives/chain-westend/Cargo.toml | 41 - .../primitives/chain-westend/src/lib.rs | 111 - .../primitives/chain-wococo/Cargo.toml | 34 - .../primitives/chain-wococo/src/lib.rs | 101 - .../primitives/header-chain/Cargo.toml | 46 - .../header-chain/src/justification.rs | 227 -- .../primitives/header-chain/src/lib.rs | 134 - .../header-chain/src/storage_keys.rs | 78 - .../tests/implementation_match.rs | 335 --- .../header-chain/tests/justification.rs | 192 -- .../primitives/message-dispatch/Cargo.toml | 27 - .../primitives/message-dispatch/src/lib.rs | 142 - .../bridges/primitives/messages/Cargo.toml | 43 - .../bridges/primitives/messages/src/lib.rs | 402 --- .../primitives/messages/src/source_chain.rs | 299 --- .../primitives/messages/src/storage_keys.rs | 128 - .../primitives/messages/src/target_chain.rs | 173 -- .../primitives/polkadot-core/Cargo.toml | 45 - .../primitives/polkadot-core/src/lib.rs | 450 ---- .../bridges/primitives/runtime/Cargo.toml | 42 - .../bridges/primitives/runtime/src/chain.rs | 215 -- .../bridges/primitives/runtime/src/lib.rs | 279 -- .../primitives/runtime/src/messages.rs | 57 - .../primitives/runtime/src/storage_proof.rs | 119 - .../bridges/primitives/test-utils/Cargo.toml | 29 - .../primitives/test-utils/src/keyring.rs | 94 - .../bridges/primitives/test-utils/src/lib.rs | 212 -- .../bridges/primitives/token-swap/Cargo.toml | 38 - .../bridges/primitives/token-swap/src/lib.rs | 124 - .../primitives/token-swap/src/storage_keys.rs | 51 - .../bridges/relays/bin-substrate/Cargo.toml | 82 - .../relays/bin-substrate/src/chains/kusama.rs | 135 - .../src/chains/kusama_headers_to_polkadot.rs | 125 - .../src/chains/kusama_messages_to_polkadot.rs | 79 - .../relays/bin-substrate/src/chains/millau.rs | 123 - .../src/chains/millau_headers_to_rialto.rs | 37 - .../src/chains/millau_messages_to_rialto.rs | 70 - .../relays/bin-substrate/src/chains/mod.rs | 344 --- .../bin-substrate/src/chains/polkadot.rs | 135 - .../src/chains/polkadot_headers_to_kusama.rs | 88 - .../src/chains/polkadot_messages_to_kusama.rs | 78 - .../relays/bin-substrate/src/chains/rialto.rs | 122 - .../src/chains/rialto_headers_to_millau.rs | 37 - .../src/chains/rialto_messages_to_millau.rs | 70 - .../src/chains/rialto_parachain.rs | 78 - .../relays/bin-substrate/src/chains/rococo.rs | 132 - .../src/chains/rococo_headers_to_wococo.rs | 56 - .../src/chains/rococo_messages_to_wococo.rs | 63 - .../bin-substrate/src/chains/westend.rs | 42 - .../src/chains/westend_headers_to_millau.rs | 37 - .../relays/bin-substrate/src/chains/wococo.rs | 126 - .../src/chains/wococo_headers_to_rococo.rs | 82 - .../src/chains/wococo_messages_to_rococo.rs | 64 - .../relays/bin-substrate/src/cli/bridge.rs | 195 -- .../bin-substrate/src/cli/derive_account.rs | 101 - .../bin-substrate/src/cli/encode_call.rs | 354 --- .../bin-substrate/src/cli/encode_message.rs | 120 - .../bin-substrate/src/cli/estimate_fee.rs | 276 -- .../bin-substrate/src/cli/init_bridge.rs | 218 -- .../relays/bin-substrate/src/cli/mod.rs | 749 ------ .../src/cli/register_parachain.rs | 370 --- .../bin-substrate/src/cli/reinit_bridge.rs | 552 ---- .../bin-substrate/src/cli/relay_headers.rs | 149 -- .../src/cli/relay_headers_and_messages.rs | 576 ---- .../bin-substrate/src/cli/relay_messages.rs | 149 -- .../src/cli/resubmit_transactions.rs | 574 ---- .../bin-substrate/src/cli/send_message.rs | 444 ---- .../bin-substrate/src/cli/swap_tokens.rs | 869 ------- .../bridges/relays/bin-substrate/src/main.rs | 31 - .../bridges/relays/client-kusama/Cargo.toml | 30 - .../bridges/relays/client-kusama/src/lib.rs | 151 -- .../relays/client-kusama/src/runtime.rs | 166 -- .../bridges/relays/client-millau/Cargo.toml | 25 - .../bridges/relays/client-millau/src/lib.rs | 198 -- .../bridges/relays/client-polkadot/Cargo.toml | 30 - .../bridges/relays/client-polkadot/src/lib.rs | 152 -- .../relays/client-polkadot/src/runtime.rs | 166 -- .../relays/client-rialto-parachain/Cargo.toml | 21 - .../relays/client-rialto-parachain/src/lib.rs | 63 - .../bridges/relays/client-rialto/Cargo.toml | 25 - .../bridges/relays/client-rialto/src/lib.rs | 196 -- .../bridges/relays/client-rococo/Cargo.toml | 31 - .../bridges/relays/client-rococo/src/lib.rs | 151 -- .../relays/client-rococo/src/runtime.rs | 145 -- .../relays/client-substrate/Cargo.toml | 48 - .../relays/client-substrate/src/chain.rs | 219 -- .../relays/client-substrate/src/client.rs | 787 ------ .../relays/client-substrate/src/error.rs | 86 - .../relays/client-substrate/src/guard.rs | 415 --- .../relays/client-substrate/src/lib.rs | 114 - .../src/metrics/float_storage_value.rs | 133 - .../client-substrate/src/metrics/mod.rs | 23 - .../src/metrics/storage_proof_overhead.rs | 101 - .../relays/client-substrate/src/rpc.rs | 76 - .../client-substrate/src/sync_header.rs | 59 - .../bridges/relays/client-westend/Cargo.toml | 21 - .../bridges/relays/client-westend/src/lib.rs | 77 - .../bridges/relays/client-wococo/Cargo.toml | 29 - .../bridges/relays/client-wococo/src/lib.rs | 151 -- .../relays/client-wococo/src/runtime.rs | 145 -- polkadot/bridges/relays/finality/Cargo.toml | 20 - .../relays/finality/src/finality_loop.rs | 692 ----- .../finality/src/finality_loop_tests.rs | 549 ---- polkadot/bridges/relays/finality/src/lib.rs | 59 - .../relays/finality/src/sync_loop_metrics.rs | 86 - .../relays/lib-substrate-relay/Cargo.toml | 52 - .../src/conversion_rate_update.rs | 440 ---- .../relays/lib-substrate-relay/src/error.rs | 61 - .../src/finality_guards.rs | 48 - .../src/finality_pipeline.rs | 191 -- .../src/finality_source.rs | 182 -- .../src/finality_target.rs | 139 - .../src/headers_initialize.rs | 282 -- .../relays/lib-substrate-relay/src/helpers.rs | 106 - .../relays/lib-substrate-relay/src/lib.rs | 53 - .../lib-substrate-relay/src/messages_lane.rs | 513 ---- .../src/messages_metrics.rs | 389 --- .../src/messages_source.rs | 652 ----- .../src/messages_target.rs | 583 ----- .../src/on_demand_headers.rs | 452 ---- polkadot/bridges/relays/messages/Cargo.toml | 24 - polkadot/bridges/relays/messages/src/lib.rs | 37 - .../relays/messages/src/message_lane.rs | 71 - .../relays/messages/src/message_lane_loop.rs | 968 ------- .../messages/src/message_race_delivery.rs | 1076 -------- .../relays/messages/src/message_race_loop.rs | 634 ----- .../messages/src/message_race_receiving.rs | 228 -- .../messages/src/message_race_strategy.rs | 517 ---- .../bridges/relays/messages/src/metrics.rs | 139 - .../src/relay_strategy/altruistic_strategy.rs | 45 - .../relay_strategy/enforcement_strategy.rs | 219 -- .../src/relay_strategy/mix_strategy.rs | 58 - .../relays/messages/src/relay_strategy/mod.rs | 123 - .../src/relay_strategy/rational_strategy.rs | 122 - polkadot/bridges/relays/utils/Cargo.toml | 32 - polkadot/bridges/relays/utils/src/error.rs | 46 - .../bridges/relays/utils/src/initialize.rs | 136 - polkadot/bridges/relays/utils/src/lib.rs | 273 -- polkadot/bridges/relays/utils/src/metrics.rs | 164 -- .../utils/src/metrics/float_json_value.rs | 147 -- .../relays/utils/src/metrics/global.rs | 118 - .../bridges/relays/utils/src/relay_loop.rs | 268 -- polkadot/bridges/rustfmt.toml | 24 - polkadot/bridges/scripts/add_license.sh | 22 - polkadot/bridges/scripts/ci-cache.sh | 19 - polkadot/bridges/scripts/dump-logs.sh | 35 - polkadot/bridges/scripts/license_header | 16 - .../send-message-from-millau-rialto.sh | 37 - .../send-message-from-rialto-millau.sh | 37 - .../bridges/scripts/update-weights-setup.sh | 33 - polkadot/bridges/scripts/update-weights.sh | 43 - polkadot/bridges/scripts/update_substrate.sh | 10 - polkadot/doc/testing.md | 1 - polkadot/node/service/src/chain_spec.rs | 32 - polkadot/runtime/rococo/Cargo.toml | 18 - .../runtime/rococo/src/bridge_messages.rs | 527 ---- polkadot/runtime/rococo/src/lib.rs | 235 +- 330 files changed, 1 insertion(+), 64568 deletions(-) delete mode 100644 polkadot/BRIDGES.md delete mode 100644 polkadot/bridges/.config/lingua.dic delete mode 100644 polkadot/bridges/.config/spellcheck.toml delete mode 100644 polkadot/bridges/.dockerignore delete mode 100644 polkadot/bridges/.editorconfig delete mode 100644 polkadot/bridges/.github/dependabot.yml delete mode 100644 polkadot/bridges/.gitignore delete mode 100644 polkadot/bridges/.gitlab-ci.yml delete mode 100644 polkadot/bridges/.maintain/millau-weight-template.hbs delete mode 100644 polkadot/bridges/CODEOWNERS delete mode 100644 polkadot/bridges/CODE_OF_CONDUCT.md delete mode 100644 polkadot/bridges/Dockerfile delete mode 100644 polkadot/bridges/LICENSE delete mode 100644 polkadot/bridges/README.md delete mode 100644 polkadot/bridges/bin/.keep delete mode 100644 polkadot/bridges/bin/millau/node/Cargo.toml delete mode 100644 polkadot/bridges/bin/millau/node/build.rs delete mode 100644 polkadot/bridges/bin/millau/node/src/chain_spec.rs delete mode 100644 polkadot/bridges/bin/millau/node/src/cli.rs delete mode 100644 polkadot/bridges/bin/millau/node/src/command.rs delete mode 100644 polkadot/bridges/bin/millau/node/src/lib.rs delete mode 100644 polkadot/bridges/bin/millau/node/src/main.rs delete mode 100644 polkadot/bridges/bin/millau/node/src/service.rs delete mode 100644 polkadot/bridges/bin/millau/runtime/Cargo.toml delete mode 100644 polkadot/bridges/bin/millau/runtime/build.rs delete mode 100644 polkadot/bridges/bin/millau/runtime/src/lib.rs delete mode 100644 polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/Cargo.toml delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/build.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/src/cli.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/src/command.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/src/lib.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/src/main.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/node/src/service.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml delete mode 100644 polkadot/bridges/bin/rialto-parachain/runtime/build.rs delete mode 100644 polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs delete mode 100644 polkadot/bridges/bin/rialto/node/Cargo.toml delete mode 100644 polkadot/bridges/bin/rialto/node/build.rs delete mode 100644 polkadot/bridges/bin/rialto/node/src/chain_spec.rs delete mode 100644 polkadot/bridges/bin/rialto/node/src/cli.rs delete mode 100644 polkadot/bridges/bin/rialto/node/src/command.rs delete mode 100644 polkadot/bridges/bin/rialto/node/src/main.rs delete mode 100644 polkadot/bridges/bin/rialto/runtime/Cargo.toml delete mode 100644 polkadot/bridges/bin/rialto/runtime/build.rs delete mode 100644 polkadot/bridges/bin/rialto/runtime/src/lib.rs delete mode 100644 polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs delete mode 100644 polkadot/bridges/bin/rialto/runtime/src/parachains.rs delete mode 100644 polkadot/bridges/bin/runtime-common/Cargo.toml delete mode 100644 polkadot/bridges/bin/runtime-common/README.md delete mode 100644 polkadot/bridges/bin/runtime-common/src/integrity.rs delete mode 100644 polkadot/bridges/bin/runtime-common/src/lib.rs delete mode 100644 polkadot/bridges/bin/runtime-common/src/messages.rs delete mode 100644 polkadot/bridges/bin/runtime-common/src/messages_api.rs delete mode 100644 polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs delete mode 100644 polkadot/bridges/ci.Dockerfile delete mode 100644 polkadot/bridges/deny.toml delete mode 100644 polkadot/bridges/deployments/BridgeDeps.Dockerfile delete mode 100644 polkadot/bridges/deployments/README.md delete mode 100644 polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json delete mode 100644 polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json delete mode 100644 polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json delete mode 100644 polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml delete mode 100644 polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml delete mode 100755 polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh delete mode 100644 polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json delete mode 100644 polkadot/bridges/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml delete mode 100644 polkadot/bridges/deployments/bridges/westend-millau/docker-compose.yml delete mode 100755 polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/bridge-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/run-millau-node.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/run-rialto-node.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/run-rococo-node.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/run-westend-node.sh delete mode 100755 polkadot/bridges/deployments/local-scripts/run-wococo-node.sh delete mode 100644 polkadot/bridges/deployments/monitoring/GrafanaMatrix.Dockerfile delete mode 100644 polkadot/bridges/deployments/monitoring/disabled.yml delete mode 100644 polkadot/bridges/deployments/monitoring/docker-compose.yml delete mode 100644 polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml delete mode 100644 polkadot/bridges/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml delete mode 100644 polkadot/bridges/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml delete mode 100644 polkadot/bridges/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml delete mode 100644 polkadot/bridges/deployments/monitoring/prometheus/prometheus.yml delete mode 100644 polkadot/bridges/deployments/networks/dashboard/grafana/beefy-dashboard.json delete mode 100644 polkadot/bridges/deployments/networks/dashboard/prometheus/millau-targets.yml delete mode 100644 polkadot/bridges/deployments/networks/dashboard/prometheus/rialto-targets.yml delete mode 100755 polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh delete mode 100755 polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh delete mode 100644 polkadot/bridges/deployments/networks/millau.yml delete mode 100644 polkadot/bridges/deployments/networks/rialto-parachain.yml delete mode 100644 polkadot/bridges/deployments/networks/rialto.yml delete mode 100644 polkadot/bridges/deployments/reverse-proxy/README.md delete mode 100644 polkadot/bridges/deployments/reverse-proxy/docker-compose.yml delete mode 100755 polkadot/bridges/deployments/run.sh delete mode 100644 polkadot/bridges/deployments/types-millau.json delete mode 100644 polkadot/bridges/deployments/types-rialto.json delete mode 100644 polkadot/bridges/deployments/types-rococo.json delete mode 100644 polkadot/bridges/deployments/types-wococo.json delete mode 100755 polkadot/bridges/deployments/types/build.sh delete mode 100644 polkadot/bridges/deployments/types/common.json delete mode 100644 polkadot/bridges/deployments/types/millau.json delete mode 100644 polkadot/bridges/deployments/types/rialto-millau.json delete mode 100644 polkadot/bridges/deployments/types/rialto.json delete mode 100644 polkadot/bridges/deployments/types/rococo-wococo.json delete mode 100644 polkadot/bridges/deployments/types/rococo.json delete mode 100644 polkadot/bridges/deployments/types/wococo.json delete mode 100644 polkadot/bridges/deployments/ui/README.md delete mode 100644 polkadot/bridges/deployments/ui/docker-compose.yml delete mode 100644 polkadot/bridges/docs/high-level-overview.md delete mode 100644 polkadot/bridges/docs/high-level.html delete mode 100644 polkadot/bridges/docs/plan.md delete mode 100644 polkadot/bridges/docs/scenario1.html delete mode 100644 polkadot/bridges/docs/send-message.md delete mode 100644 polkadot/bridges/docs/testing-scenarios.md delete mode 100644 polkadot/bridges/fuzz/storage-proof/Cargo.lock delete mode 100644 polkadot/bridges/fuzz/storage-proof/Cargo.toml delete mode 100644 polkadot/bridges/fuzz/storage-proof/README.md delete mode 100644 polkadot/bridges/fuzz/storage-proof/src/main.rs delete mode 100644 polkadot/bridges/modules/dispatch/Cargo.toml delete mode 100644 polkadot/bridges/modules/dispatch/README.md delete mode 100644 polkadot/bridges/modules/dispatch/src/lib.rs delete mode 100644 polkadot/bridges/modules/grandpa/Cargo.toml delete mode 100644 polkadot/bridges/modules/grandpa/src/benchmarking.rs delete mode 100644 polkadot/bridges/modules/grandpa/src/lib.rs delete mode 100644 polkadot/bridges/modules/grandpa/src/mock.rs delete mode 100644 polkadot/bridges/modules/grandpa/src/weights.rs delete mode 100644 polkadot/bridges/modules/messages/Cargo.toml delete mode 100644 polkadot/bridges/modules/messages/README.md delete mode 100644 polkadot/bridges/modules/messages/src/benchmarking.rs delete mode 100644 polkadot/bridges/modules/messages/src/inbound_lane.rs delete mode 100644 polkadot/bridges/modules/messages/src/instant_payments.rs delete mode 100644 polkadot/bridges/modules/messages/src/lib.rs delete mode 100644 polkadot/bridges/modules/messages/src/mock.rs delete mode 100644 polkadot/bridges/modules/messages/src/outbound_lane.rs delete mode 100644 polkadot/bridges/modules/messages/src/weights.rs delete mode 100644 polkadot/bridges/modules/messages/src/weights_ext.rs delete mode 100644 polkadot/bridges/modules/shift-session-manager/Cargo.toml delete mode 100644 polkadot/bridges/modules/shift-session-manager/src/lib.rs delete mode 100644 polkadot/bridges/modules/token-swap/Cargo.toml delete mode 100644 polkadot/bridges/modules/token-swap/src/benchmarking.rs delete mode 100644 polkadot/bridges/modules/token-swap/src/lib.rs delete mode 100644 polkadot/bridges/modules/token-swap/src/mock.rs delete mode 100644 polkadot/bridges/modules/token-swap/src/weights.rs delete mode 100644 polkadot/bridges/modules/token-swap/src/weights_ext.rs delete mode 100644 polkadot/bridges/primitives/chain-kusama/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-kusama/src/lib.rs delete mode 100644 polkadot/bridges/primitives/chain-millau/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-millau/src/lib.rs delete mode 100644 polkadot/bridges/primitives/chain-millau/src/millau_hash.rs delete mode 100644 polkadot/bridges/primitives/chain-polkadot/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-polkadot/src/lib.rs delete mode 100644 polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs delete mode 100644 polkadot/bridges/primitives/chain-rialto/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-rialto/src/lib.rs delete mode 100644 polkadot/bridges/primitives/chain-rococo/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-rococo/src/lib.rs delete mode 100644 polkadot/bridges/primitives/chain-westend/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-westend/src/lib.rs delete mode 100644 polkadot/bridges/primitives/chain-wococo/Cargo.toml delete mode 100644 polkadot/bridges/primitives/chain-wococo/src/lib.rs delete mode 100644 polkadot/bridges/primitives/header-chain/Cargo.toml delete mode 100644 polkadot/bridges/primitives/header-chain/src/justification.rs delete mode 100644 polkadot/bridges/primitives/header-chain/src/lib.rs delete mode 100644 polkadot/bridges/primitives/header-chain/src/storage_keys.rs delete mode 100644 polkadot/bridges/primitives/header-chain/tests/implementation_match.rs delete mode 100644 polkadot/bridges/primitives/header-chain/tests/justification.rs delete mode 100644 polkadot/bridges/primitives/message-dispatch/Cargo.toml delete mode 100644 polkadot/bridges/primitives/message-dispatch/src/lib.rs delete mode 100644 polkadot/bridges/primitives/messages/Cargo.toml delete mode 100644 polkadot/bridges/primitives/messages/src/lib.rs delete mode 100644 polkadot/bridges/primitives/messages/src/source_chain.rs delete mode 100644 polkadot/bridges/primitives/messages/src/storage_keys.rs delete mode 100644 polkadot/bridges/primitives/messages/src/target_chain.rs delete mode 100644 polkadot/bridges/primitives/polkadot-core/Cargo.toml delete mode 100644 polkadot/bridges/primitives/polkadot-core/src/lib.rs delete mode 100644 polkadot/bridges/primitives/runtime/Cargo.toml delete mode 100644 polkadot/bridges/primitives/runtime/src/chain.rs delete mode 100644 polkadot/bridges/primitives/runtime/src/lib.rs delete mode 100644 polkadot/bridges/primitives/runtime/src/messages.rs delete mode 100644 polkadot/bridges/primitives/runtime/src/storage_proof.rs delete mode 100644 polkadot/bridges/primitives/test-utils/Cargo.toml delete mode 100644 polkadot/bridges/primitives/test-utils/src/keyring.rs delete mode 100644 polkadot/bridges/primitives/test-utils/src/lib.rs delete mode 100644 polkadot/bridges/primitives/token-swap/Cargo.toml delete mode 100644 polkadot/bridges/primitives/token-swap/src/lib.rs delete mode 100644 polkadot/bridges/primitives/token-swap/src/storage_keys.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/Cargo.toml delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/millau.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/mod.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/westend.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/mod.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/reinit_bridge.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs delete mode 100644 polkadot/bridges/relays/bin-substrate/src/main.rs delete mode 100644 polkadot/bridges/relays/client-kusama/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-kusama/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-kusama/src/runtime.rs delete mode 100644 polkadot/bridges/relays/client-millau/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-millau/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-polkadot/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-polkadot/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-polkadot/src/runtime.rs delete mode 100644 polkadot/bridges/relays/client-rialto-parachain/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-rialto-parachain/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-rialto/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-rialto/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-rococo/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-rococo/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-rococo/src/runtime.rs delete mode 100644 polkadot/bridges/relays/client-substrate/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-substrate/src/chain.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/client.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/error.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/guard.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/metrics/mod.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/rpc.rs delete mode 100644 polkadot/bridges/relays/client-substrate/src/sync_header.rs delete mode 100644 polkadot/bridges/relays/client-westend/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-westend/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-wococo/Cargo.toml delete mode 100644 polkadot/bridges/relays/client-wococo/src/lib.rs delete mode 100644 polkadot/bridges/relays/client-wococo/src/runtime.rs delete mode 100644 polkadot/bridges/relays/finality/Cargo.toml delete mode 100644 polkadot/bridges/relays/finality/src/finality_loop.rs delete mode 100644 polkadot/bridges/relays/finality/src/finality_loop_tests.rs delete mode 100644 polkadot/bridges/relays/finality/src/lib.rs delete mode 100644 polkadot/bridges/relays/finality/src/sync_loop_metrics.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/Cargo.toml delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/error.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/finality_guards.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/finality_source.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/lib.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/messages_metrics.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs delete mode 100644 polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs delete mode 100644 polkadot/bridges/relays/messages/Cargo.toml delete mode 100644 polkadot/bridges/relays/messages/src/lib.rs delete mode 100644 polkadot/bridges/relays/messages/src/message_lane.rs delete mode 100644 polkadot/bridges/relays/messages/src/message_lane_loop.rs delete mode 100644 polkadot/bridges/relays/messages/src/message_race_delivery.rs delete mode 100644 polkadot/bridges/relays/messages/src/message_race_loop.rs delete mode 100644 polkadot/bridges/relays/messages/src/message_race_receiving.rs delete mode 100644 polkadot/bridges/relays/messages/src/message_race_strategy.rs delete mode 100644 polkadot/bridges/relays/messages/src/metrics.rs delete mode 100644 polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs delete mode 100644 polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs delete mode 100644 polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs delete mode 100644 polkadot/bridges/relays/messages/src/relay_strategy/mod.rs delete mode 100644 polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs delete mode 100644 polkadot/bridges/relays/utils/Cargo.toml delete mode 100644 polkadot/bridges/relays/utils/src/error.rs delete mode 100644 polkadot/bridges/relays/utils/src/initialize.rs delete mode 100644 polkadot/bridges/relays/utils/src/lib.rs delete mode 100644 polkadot/bridges/relays/utils/src/metrics.rs delete mode 100644 polkadot/bridges/relays/utils/src/metrics/float_json_value.rs delete mode 100644 polkadot/bridges/relays/utils/src/metrics/global.rs delete mode 100644 polkadot/bridges/relays/utils/src/relay_loop.rs delete mode 100644 polkadot/bridges/rustfmt.toml delete mode 100755 polkadot/bridges/scripts/add_license.sh delete mode 100755 polkadot/bridges/scripts/ci-cache.sh delete mode 100755 polkadot/bridges/scripts/dump-logs.sh delete mode 100644 polkadot/bridges/scripts/license_header delete mode 100755 polkadot/bridges/scripts/send-message-from-millau-rialto.sh delete mode 100755 polkadot/bridges/scripts/send-message-from-rialto-millau.sh delete mode 100644 polkadot/bridges/scripts/update-weights-setup.sh delete mode 100755 polkadot/bridges/scripts/update-weights.sh delete mode 100755 polkadot/bridges/scripts/update_substrate.sh delete mode 100644 polkadot/runtime/rococo/src/bridge_messages.rs diff --git a/polkadot/.github/CODEOWNERS b/polkadot/.github/CODEOWNERS index 1255c06050c..37839791456 100644 --- a/polkadot/.github/CODEOWNERS +++ b/polkadot/.github/CODEOWNERS @@ -1,5 +1,3 @@ -bridges/ @tomusdrw @svyatonik @acatangiu - # CI /.github/ @paritytech/ci @chevdor /scripts/ci/ @paritytech/ci @chevdor diff --git a/polkadot/BRIDGES.md b/polkadot/BRIDGES.md deleted file mode 100644 index fb61132fbbb..00000000000 --- a/polkadot/BRIDGES.md +++ /dev/null @@ -1,47 +0,0 @@ -# Using Parity Bridges Common dependency (`git subtree`). - -In `./bridges` sub-directory you can find a `git subtree` imported version of: -[parity-bridges-common](https://github.com/paritytech/parity-bridges-common/) repository. - -# How to fix broken Bridges code? - -To fix Bridges code simply create a commit in current (`polkadot`) repo. Best if -the commit is isolated to changes in `./bridges` sub-directory, because it makes -it easier to import that change back to upstream repo. - -# How to pull latest Bridges code or contribute back? - -Note that it's totally fine to ping the Bridges Team to do that for you. The point -of adding the code as `git subtree` is to **reduce maintenance cost** for Polkadot -developers. - -If you still would like to either update the code to match latest code from the repo -or create an upstream PR read below. The following commands should be run in the -current (`polkadot`) repo. - -1. Add Bridges repo as a local remote: -``` -$ git remote add -f bridges git@github.com:paritytech/parity-bridges-common.git -``` - -If you plan to contribute back, consider forking the repository on Github and adding -your personal fork as a remote as well. -``` -$ git remote add -f my-bridges git@github.com:tomusdrw/parity-bridges-common.git -``` - -2. To update Bridges: -``` -$ git fetch bridges master -$ git subtree pull --prefix=bridges bridges master --squash -```` - -We use `--squash` to avoid adding individual commits and rather squashing them -all into one. - -3. Contributing back to Bridges (creating upstream PR) -``` -$ git subtree push --prefix=bridges my-bridges master -``` -This command will push changes to your personal fork of Bridges repo, from where -you can simply create a PR to the main repo. diff --git a/polkadot/Cargo.lock b/polkadot/Cargo.lock index d4ccc0fb72c..612b5cc0504 100644 --- a/polkadot/Cargo.lock +++ b/polkadot/Cargo.lock @@ -674,162 +674,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "bp-header-chain" -version = "0.1.0" -dependencies = [ - "bp-runtime", - "bp-test-utils", - "finality-grandpa", - "frame-support", - "hex", - "hex-literal", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-finality-grandpa", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-message-dispatch" -version = "0.1.0" -dependencies = [ - "bp-runtime", - "frame-support", - "parity-scale-codec", - "scale-info", - "sp-std", -] - -[[package]] -name = "bp-messages" -version = "0.1.0" -dependencies = [ - "bitvec", - "bp-runtime", - "frame-support", - "frame-system", - "hex", - "hex-literal", - "impl-trait-for-tuples", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-std", -] - -[[package]] -name = "bp-polkadot-core" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-runtime", - "frame-support", - "frame-system", - "hex", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-core", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "bp-rococo" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-polkadot-core", - "bp-runtime", - "frame-support", - "parity-scale-codec", - "smallvec", - "sp-api", - "sp-runtime", - "sp-std", - "sp-version", -] - -[[package]] -name = "bp-runtime" -version = "0.1.0" -dependencies = [ - "frame-support", - "hash-db", - "hex-literal", - "num-traits", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "bp-test-utils" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "ed25519-dalek", - "finality-grandpa", - "parity-scale-codec", - "sp-application-crypto", - "sp-finality-grandpa", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bp-wococo" -version = "0.1.0" -dependencies = [ - "bp-messages", - "bp-polkadot-core", - "bp-rococo", - "bp-runtime", - "parity-scale-codec", - "sp-api", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "bridge-runtime-common" -version = "0.1.0" -dependencies = [ - "bp-message-dispatch", - "bp-messages", - "bp-runtime", - "ed25519-dalek", - "frame-support", - "frame-system", - "hash-db", - "pallet-balances", - "pallet-bridge-dispatch", - "pallet-bridge-grandpa", - "pallet-bridge-messages", - "pallet-transaction-payment", - "parity-scale-codec", - "scale-info", - "sp-api", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", - "sp-version", - "static_assertions", -] - [[package]] name = "bs58" version = "0.4.0" @@ -5159,70 +5003,6 @@ dependencies = [ "sp-std", ] -[[package]] -name = "pallet-bridge-dispatch" -version = "0.1.0" -dependencies = [ - "bp-message-dispatch", - "bp-runtime", - "frame-support", - "frame-system", - "log", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - -[[package]] -name = "pallet-bridge-grandpa" -version = "0.1.0" -dependencies = [ - "bp-header-chain", - "bp-runtime", - "bp-test-utils", - "finality-grandpa", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-finality-grandpa", - "sp-io", - "sp-runtime", - "sp-std", - "sp-trie", -] - -[[package]] -name = "pallet-bridge-messages" -version = "0.1.0" -dependencies = [ - "bitvec", - "bp-message-dispatch", - "bp-messages", - "bp-runtime", - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "num-traits", - "pallet-balances", - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] - [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" @@ -8492,11 +8272,6 @@ version = "0.9.25" dependencies = [ "beefy-merkle-tree", "beefy-primitives", - "bp-messages", - "bp-rococo", - "bp-runtime", - "bp-wococo", - "bridge-runtime-common", "frame-benchmarking", "frame-executive", "frame-support", @@ -8511,9 +8286,6 @@ dependencies = [ "pallet-balances", "pallet-beefy", "pallet-beefy-mmr", - "pallet-bridge-dispatch", - "pallet-bridge-grandpa", - "pallet-bridge-messages", "pallet-collective", "pallet-grandpa", "pallet-im-online", diff --git a/polkadot/Cargo.toml b/polkadot/Cargo.toml index 355b15dec88..1b6cd4b8b21 100644 --- a/polkadot/Cargo.toml +++ b/polkadot/Cargo.toml @@ -114,10 +114,6 @@ members = [ "utils/generate-bags", ] -# We want to be able to build the bridge relayer without pulling it (and all of its -# dependencies into the Polkadot workspace) -exclude = ["bridges/relays/bin-substrate", "bridges/bin/rialto/runtime", "bridges/bin/millau/runtime"] - [badges] maintenance = { status = "actively-developed" } diff --git a/polkadot/bridges/.config/lingua.dic b/polkadot/bridges/.config/lingua.dic deleted file mode 100644 index 223a962006a..00000000000 --- a/polkadot/bridges/.config/lingua.dic +++ /dev/null @@ -1,243 +0,0 @@ -90 - -&& -1KB -1MB -5MB -= -API/SM -APIs -AccountId/MS -Apache-2.0/M -Autogenerated -BFT/M -BTC/S -Best/MS -BlockId -BlockNumber -BridgeStorage -clonable -CLI/MS -Chain1 -Chain2 -ChainSpec -ChainTime -DOT/S -ERC-20 -Ethereum -FN -FinalizationError -GPL/M -GPLv3/M -GiB/S -Handler/MS -Hasher -HeaderA -HeaderId -InitiateChange -Instance1 -Instance2 -Instance42 -InstantCurrencyPayments -KSM/S -KYC/M -keypair/MS -KeyPair -Kovan -Lane1 -Lane2 -Lane3 -LaneId -MIN_SIZE -MIT/M -MMR -MaxUnrewardedRelayerEntriesAtInboundLane -MaybeExtra -MaybeOrphan -Merklized -MessageNonce -MessageNonces -MessagePayload -MetricsParams -Millau/MS -OldHeader -OutboundMessages -PoA -PoV/MS -Pre -RLP -RPC/MS -Rialto/MS -Relayer/MS -Runtime1 -Runtime2 -SIZE_FACTOR -SS58 -SS58Prefix -STALL_SYNC_TIMEOUT -SURI -ServiceFactory/MS -SignedExtension -Stringified -Submitter1 -S|N -TCP -ThisChain -TODO -U256 -Unparsed -Vec -WND/S -Westend/MS -Wococo/MS -XCM/S -XCMP/M -annualised/MS -api/SM -aren -arg -args -async -auth -auths/SM -backoff -benchmarking/MS -best_substrate_header -bitfield/MS -blake2/MS -blockchain/MS -borked -chain_getBlock -choosen -config/MS -cooldown -crypto/MS -customizable/B -Debian/M -decodable/MS -delivery_and_dispatch_fee -dev -dispatchable -dispatchables -doesn -ed25519 -enum/MS -entrypoint/MS -ethereum/MS -externality/MS -extrinsic/MS -extrinsics -fedora/M -functor -fuzzer -hasher -hardcoded -https -implementers -include/BG -inherent/MS -initialize/RG -instantiate/B -intrinsic/MS -invariant/MS -invariants -io -isn -isolate/BG -js -jsonrpsee -keccak -keccak256/M -keyring -keystore/MS -kusama/S -lane -malus -max_value -merkle/MS -metadata -millau -misbehavior/SM -misbehaviors -multivalidator/SM -natively -no_std -nonces -number -ok -oneshot/MS -others' -pallet_bridge_grandpa -pallet_bridge_messages -pallet_message_lane -parablock/MS -parachain/MS -param/MS -parameterize/D -plancks -polkadot/MS -pov-block/MS -precommit -proc-macro/MS -prometheus -proxying -provisioner/MS -probabilistically -prune_depth -prune_end -receival -reconnection -redhat/M -repo/MS -runtime/MS -rustc/MS -relayer/MS -shouldn -source_at_target -source_latest_confirmed -source_latest_generated -sp_finality_grandpa -spawner -sr25519 -src -stringified -struct/MS -submitters/MS -subsystem/MS -subsystems' -subcommand/MS -synchronizer -target_at_source -target_latest_confirmed -target_latest_received -taskmanager/MS -teleport/RG -teleportation/SM -teleporter/SM -teleporters -testnet/MS -timeframe -tokio -timestamp -trie/MS -trustless/Y -tuple -u32 -ubuntu/M -undeliverable -unfinalized -union/MSG -unpruned -unservable/B -unsynced -updatable -validator/SM -ve -vec -verifier -w3f/MS -wakeup -wasm/M -websocket -x2 -~ diff --git a/polkadot/bridges/.config/spellcheck.toml b/polkadot/bridges/.config/spellcheck.toml deleted file mode 100644 index e061c29ac22..00000000000 --- a/polkadot/bridges/.config/spellcheck.toml +++ /dev/null @@ -1,13 +0,0 @@ -[hunspell] -lang = "en_US" -search_dirs = ["."] -extra_dictionaries = ["lingua.dic"] -skip_os_lookups = true -use_builtin = true - -[hunspell.quirks] -# `Type`'s -# 5x -transform_regex = ["^'([^\\s])'$", "^[0-9]+(?:\\.[0-9]*)?x$", "^'s$", "^\\+$", "[><+-]"] -allow_concatenation = true -allow_dashes = true diff --git a/polkadot/bridges/.dockerignore b/polkadot/bridges/.dockerignore deleted file mode 100644 index f4ceea78560..00000000000 --- a/polkadot/bridges/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -**/target/ diff --git a/polkadot/bridges/.editorconfig b/polkadot/bridges/.editorconfig deleted file mode 100644 index e2375881ea0..00000000000 --- a/polkadot/bridges/.editorconfig +++ /dev/null @@ -1,19 +0,0 @@ -root = true -[*] -indent_style=tab -indent_size=tab -tab_width=4 -end_of_line=lf -charset=utf-8 -trim_trailing_whitespace=true -max_line_length=100 -insert_final_newline=true - -[*.{yml,md,yaml,sh}] -indent_style=space -indent_size=2 -tab_width=8 -end_of_line=lf - -[*.md] -max_line_length=80 diff --git a/polkadot/bridges/.github/dependabot.yml b/polkadot/bridges/.github/dependabot.yml deleted file mode 100644 index a06d573703d..00000000000 --- a/polkadot/bridges/.github/dependabot.yml +++ /dev/null @@ -1,42 +0,0 @@ -version: 2 -updates: -- package-ecosystem: cargo - directory: "/" - schedule: - interval: weekly - time: "03:00" - timezone: Europe/Berlin - open-pull-requests-limit: 20 - ignore: - - dependency-name: frame-* - versions: - - ">= 0" - - dependency-name: node-inspect - versions: - - ">= 0" - - dependency-name: pallet-* - versions: - - ">= 0" - - dependency-name: sc-* - versions: - - ">= 0" - - dependency-name: sp-* - versions: - - ">= 0" - - dependency-name: substrate-* - versions: - - ">= 0" - - dependency-name: vergen - versions: - - 4.0.1 - - 4.0.2 - - 4.1.0 - - 4.2.0 - - dependency-name: jsonrpc-core - versions: - - 17.0.0 - - dependency-name: finality-grandpa - versions: - - 0.13.0 - - 0.14.0 - rebase-strategy: disabled diff --git a/polkadot/bridges/.gitignore b/polkadot/bridges/.gitignore deleted file mode 100644 index 5d10cfa41a4..00000000000 --- a/polkadot/bridges/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -**/target/ -**/.env -**/.env2 -**/rust-toolchain -hfuzz_target -hfuzz_workspace -**/Cargo.lock - -**/*.rs.bk - -*.o -*.so -*.rlib -*.dll -.gdb_history - -*.exe - -.DS_Store - -.cargo -.idea -.vscode -*.iml -*.swp -*.swo diff --git a/polkadot/bridges/.gitlab-ci.yml b/polkadot/bridges/.gitlab-ci.yml deleted file mode 100644 index 7d3bf6fd8ac..00000000000 --- a/polkadot/bridges/.gitlab-ci.yml +++ /dev/null @@ -1,298 +0,0 @@ -stages: - - lint - - check - - test - - build - - publish - -workflow: - rules: - - if: $CI_COMMIT_TAG - - if: $CI_COMMIT_BRANCH - -variables: &default-vars - GIT_STRATEGY: fetch - GIT_DEPTH: 100 - CARGO_INCREMENTAL: 0 - ARCH: "x86_64" - CI_IMAGE: "paritytech/bridges-ci:staging" - RUST_BACKTRACE: full - -default: - cache: {} - -.collect-artifacts: &collect-artifacts - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" - when: on_success - expire_in: 7 days - paths: - - artifacts/ - -.kubernetes-build: &kubernetes-build - tags: - - kubernetes-parity-build - interruptible: true - -.docker-env: &docker-env - image: "${CI_IMAGE}" - before_script: - - rustup show - - cargo --version - - rustup +nightly show - - cargo +nightly --version - - sccache -s - retry: - max: 2 - when: - - runner_system_failure - - unknown_failure - - api_failure - interruptible: true - tags: - - linux-docker - -.test-refs: &test-refs - rules: - # FIXME: This is the cause why pipelines wouldn't start. The problem might be in our custom - # mirroring. This should be investigated further, but for now let's have the working - # pipeline. - # - if: $CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH - # changes: - # - '**.md' - # - diagrams/* - # - docs/* - # when: never - - if: $CI_PIPELINE_SOURCE == "pipeline" - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - -.build-refs: &build-refs - rules: - # won't run on the CI image update pipeline - - if: $CI_PIPELINE_SOURCE == "pipeline" - when: never - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]{4}-[0-9]{2}-[0-9]{2}.*$/ # i.e. v2021-09-27, v2021-09-27-1 - # there are two types of nightly pipelines: - # 1. this one is triggered by the schedule with $PIPELINE == "nightly", it's for releasing. - # this job runs only on nightly pipeline with the mentioned variable, against `master` branch - - if: $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" - -.nightly-test: &nightly-test - rules: - # 2. another is triggered by scripts repo $CI_PIPELINE_SOURCE == "pipeline" it's for the CI image - # update, it also runs all the nightly checks. - - if: $CI_PIPELINE_SOURCE == "pipeline" - -#### stage: lint - -clippy-nightly: - stage: lint - <<: *docker-env - <<: *test-refs - script: - - SKIP_WASM_BUILD=1 cargo +nightly clippy --all-targets -- -A clippy::redundant_closure - -fmt: - stage: lint - <<: *docker-env - <<: *test-refs - script: - - cargo +nightly fmt --all -- --check - -spellcheck: - stage: lint - <<: *docker-env - <<: *test-refs - script: - - cargo spellcheck check -vvvv --cfg=.config/spellcheck.toml --checkers hunspell -m 1 - -#### stage: check - -check: - stage: check - <<: *docker-env - <<: *test-refs - script: &check-script - - SKIP_WASM_BUILD=1 time cargo check --locked --verbose --workspace - # Check Rialto benchmarks runtime - - SKIP_WASM_BUILD=1 time cargo check -p rialto-runtime --locked --features runtime-benchmarks --verbose - # Check Millau benchmarks runtime - - SKIP_WASM_BUILD=1 time cargo check -p millau-runtime --locked --features runtime-benchmarks --verbose - -check-nightly: - stage: check - <<: *docker-env - <<: *nightly-test - script: - - rustup default nightly - - *check-script - -#### stage: test - -test: - stage: test - <<: *docker-env - <<: *test-refs -# variables: -# RUSTFLAGS: "-D warnings" - script: &test-script - - time cargo fetch - - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-test-runtime\").manifest_path"` - - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-runtime\").manifest_path"` - - CARGO_NET_OFFLINE=true time cargo test --verbose --workspace - -test-nightly: - stage: test - <<: *docker-env - <<: *nightly-test - script: - - rustup default nightly - - *test-script - -deny: - stage: test - <<: *docker-env - <<: *nightly-test - <<: *collect-artifacts - script: - - cargo deny check advisories --hide-inclusion-graph - - cargo deny check bans sources --hide-inclusion-graph - after_script: - - mkdir -p ./artifacts - - echo "___Complete logs can be found in the artifacts___" - - cargo deny check advisories 2> advisories.log - - cargo deny check bans sources 2> bans_sources.log - # this job is allowed to fail, only licenses check is important - allow_failure: true - -deny-licenses: - stage: test - <<: *docker-env - <<: *test-refs - <<: *collect-artifacts - script: - - cargo deny check licenses --hide-inclusion-graph - after_script: - - mkdir -p ./artifacts - - echo "___Complete logs can be found in the artifacts___" - - cargo deny check licenses 2> licenses.log - -#### stage: build - -build: - stage: build - <<: *docker-env - <<: *build-refs - <<: *collect-artifacts - # master - script: &build-script - - time cargo fetch - - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-test-runtime\").manifest_path"` - - time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-runtime\").manifest_path"` - - CARGO_NET_OFFLINE=true time cargo build --release --verbose --workspace - after_script: - # Prepare artifacts - - mkdir -p ./artifacts - - strip ./target/release/rialto-bridge-node - - mv -v ./target/release/rialto-bridge-node ./artifacts/ - - strip ./target/release/rialto-parachain-collator - - mv -v ./target/release/rialto-parachain-collator ./artifacts/ - - strip ./target/release/millau-bridge-node - - mv -v ./target/release/millau-bridge-node ./artifacts/ - - strip ./target/release/substrate-relay - - mv -v ./target/release/substrate-relay ./artifacts/ - - mv -v ./deployments/local-scripts/bridge-entrypoint.sh ./artifacts/ - - mv -v ./ci.Dockerfile ./artifacts/ - -build-nightly: - stage: build - <<: *docker-env - <<: *collect-artifacts - <<: *nightly-test - script: - - rustup default nightly - - *build-script - -#### stage: publish - -.build-push-image: &build-push-image - <<: *kubernetes-build - image: quay.io/buildah/stable - <<: *build-refs - variables: &image-variables - GIT_STRATEGY: none - DOCKERFILE: ci.Dockerfile - IMAGE_NAME: docker.io/paritytech/$CI_JOB_NAME - VAULT_SERVER_URL: "https://vault.parity-mgmt-vault.parity.io" - VAULT_AUTH_PATH: "gitlab-parity-io-jwt" - VAULT_AUTH_ROLE: "cicd_gitlab_parity_${CI_PROJECT_NAME}" - needs: - - job: build - artifacts: true - before_script: &check-versions - - if [[ "${CI_COMMIT_TAG}" ]]; then - VERSION=${CI_COMMIT_TAG}; - elif [[ "${CI_COMMIT_REF_NAME}" ]]; then - VERSION=$(echo ${CI_COMMIT_REF_NAME} | sed -r 's#/+#-#g'); - fi - # When building from version tags (v1.0, v2.1rc1, ...) we'll use "production" to tag - # docker image. In all other cases, it'll be "latest". - - if [[ $CI_COMMIT_REF_NAME =~ ^v[0-9]+\.[0-9]+.*$ ]]; then - FLOATING_TAG="production"; - else - FLOATING_TAG="latest"; - fi - - echo "Effective tags = ${VERSION} sha-${CI_COMMIT_SHORT_SHA} ${FLOATING_TAG}" - secrets: - DOCKER_HUB_USER: - vault: cicd/gitlab/parity/DOCKER_HUB_USER@kv - file: false - DOCKER_HUB_PASS: - vault: cicd/gitlab/parity/DOCKER_HUB_PASS@kv - file: false - script: - - test "${DOCKER_HUB_USER}" -a "${DOCKER_HUB_PASS}" || - ( echo "no docker credentials provided"; exit 1 ) - - cd ./artifacts - - buildah bud - --format=docker - --build-arg VCS_REF="${CI_COMMIT_SHORT_SHA}" - --build-arg BUILD_DATE="$(date +%d-%m-%Y)" - --build-arg PROJECT="${CI_JOB_NAME}" - --build-arg VERSION="${VERSION}" - --tag "${IMAGE_NAME}:${VERSION}" - --tag "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}" - --tag "${IMAGE_NAME}:${FLOATING_TAG}" - --file "${DOCKERFILE}" . - # The job will success only on the protected branch - - echo "${DOCKER_HUB_PASS}" | - buildah login --username "${DOCKER_HUB_USER}" --password-stdin docker.io - - buildah info - - buildah push --format=v2s2 "${IMAGE_NAME}:${VERSION}" - - buildah push --format=v2s2 "${IMAGE_NAME}:sha-${CI_COMMIT_SHORT_SHA}" - - buildah push --format=v2s2 "${IMAGE_NAME}:${FLOATING_TAG}" - after_script: - - env REGISTRY_AUTH_FILE= buildah logout --all - -rialto-bridge-node: - stage: publish - <<: *build-push-image - -rialto-parachain-collator: - stage: publish - <<: *build-push-image - -millau-bridge-node: - stage: publish - <<: *build-push-image - -substrate-relay: - stage: publish - <<: *build-push-image - -# FIXME: publish binaries diff --git a/polkadot/bridges/.maintain/millau-weight-template.hbs b/polkadot/bridges/.maintain/millau-weight-template.hbs deleted file mode 100644 index 7a2a67627bb..00000000000 --- a/polkadot/bridges/.maintain/millau-weight-template.hbs +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for `{{pallet}}` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} -//! DATE: {{date}}, STEPS: {{cmd.steps}}, REPEAT: {{cmd.repeat}} -//! LOW RANGE: {{cmd.lowest_range_values}}, HIGH RANGE: {{cmd.highest_range_values}} -//! EXECUTION: {{cmd.execution}}, WASM-EXECUTION: {{cmd.wasm_execution}} -//! CHAIN: {{cmd.chain}}, DB CACHE: {{cmd.db_cache}} - -// Executed Command: -{{#each args as |arg|~}} -// {{arg}} -{{/each}} - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for `{{pallet}}`. -pub trait WeightInfo { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{c.name}}: u32, {{/each~}} - ) -> Weight; - {{~/each}} -} - -/// Weights for `{{pallet}}` using the Millau node and recommended hardware. -pub struct MillauWeight(PhantomData); -impl WeightInfo for MillauWeight { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} - ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} - .saturating_add(T::DbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} - .saturating_add(T::DbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} - .saturating_add(T::DbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} - .saturating_add(T::DbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} - } - {{~/each}} -} - -// For backwards compatibility and tests -impl WeightInfo for () { - {{~#each benchmarks as |benchmark|}} - fn {{benchmark.name~}} - ( - {{~#each benchmark.components as |c| ~}} - {{~#if (not c.is_used)}}_{{/if}}{{c.name}}: u32, {{/each~}} - ) -> Weight { - ({{underscore benchmark.base_weight}} as Weight) - {{~#each benchmark.component_weight as |cw|}} - .saturating_add(({{underscore cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight)) - {{~/each}} - {{~#if (ne benchmark.base_reads "0")}} - .saturating_add(RocksDbWeight::get().reads({{benchmark.base_reads}} as Weight)) - {{~/if}} - {{~#each benchmark.component_reads as |cr|}} - .saturating_add(RocksDbWeight::get().reads(({{cr.slope}} as Weight).saturating_mul({{cr.name}} as Weight))) - {{~/each}} - {{~#if (ne benchmark.base_writes "0")}} - .saturating_add(RocksDbWeight::get().writes({{benchmark.base_writes}} as Weight)) - {{~/if}} - {{~#each benchmark.component_writes as |cw|}} - .saturating_add(RocksDbWeight::get().writes(({{cw.slope}} as Weight).saturating_mul({{cw.name}} as Weight))) - {{~/each}} - } - {{~/each}} -} diff --git a/polkadot/bridges/CODEOWNERS b/polkadot/bridges/CODEOWNERS deleted file mode 100644 index 3941ba8451a..00000000000 --- a/polkadot/bridges/CODEOWNERS +++ /dev/null @@ -1,21 +0,0 @@ -# Lists some code owners. -# -# A codeowner just oversees some part of the codebase. If an owned file is changed then the -# corresponding codeowner receives a review request. An approval of the codeowner might be -# required for merging a PR (depends on repository settings). -# -# For details about syntax, see: -# https://help.github.com/en/articles/about-code-owners -# But here are some important notes: -# -# - Glob syntax is git-like, e.g. `/core` means the core directory in the root, unlike `core` -# which can be everywhere. -# - Multiple owners are supported. -# - Either handle (e.g, @github_user or @github_org/team) or email can be used. Keep in mind, -# that handles might work better because they are more recognizable on GitHub, -# eyou can use them for mentioning unlike an email. -# - The latest matching rule, if multiple, takes precedence. - -# CI -/.github/ @paritytech/ci -/.gitlab-ci.yml @paritytech/ci diff --git a/polkadot/bridges/CODE_OF_CONDUCT.md b/polkadot/bridges/CODE_OF_CONDUCT.md deleted file mode 100644 index 70541fb72fa..00000000000 --- a/polkadot/bridges/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,80 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers -pledge to making participation in our project and our community a harassment-free experience for -everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity -and expression, level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit - permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -### Facilitation, Not Strongarming - -We recognise that this software is merely a tool for users to create and maintain their blockchain -of preference. We see that blockchains are naturally community platforms with users being the -ultimate decision makers. We assert that good software will maximise user agency by facilitate -user-expression on the network. As such: - -- This project will strive to give users as much choice as is both reasonable and possible over what - protocol they adhere to; but -- use of the project's technical forums, commenting systems, pull requests and issue trackers as a - means to express individual protocol preferences is forbidden. - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are -expected to take appropriate and fair corrective action in response to any instances of unacceptable -behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, -code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or -to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is -representing the project or its community. Examples of representing a project or community include -using an official project e-mail address, posting via an official social media account, or acting as -an appointed representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting -the project team at admin@parity.io. All complaints will be reviewed and investigated and will -result in a response that is deemed necessary and appropriate to the circumstances. The project team -is obligated to maintain confidentiality with regard to the reporter of an incident. Further -details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face -temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at -https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/polkadot/bridges/Dockerfile b/polkadot/bridges/Dockerfile deleted file mode 100644 index bc51f76ba99..00000000000 --- a/polkadot/bridges/Dockerfile +++ /dev/null @@ -1,71 +0,0 @@ -# Builds images used by the bridge. -# -# In particular, it can be used to build Substrate nodes and bridge relayers. The binary that gets -# built can be specified with the `PROJECT` build-arg. For example, to build the `substrate-relay` -# you would do the following: -# -# `docker build . -t local/substrate-relay --build-arg=PROJECT=substrate-relay` -# -# See the `deployments/README.md` for all the available `PROJECT` values. - -FROM docker.io/paritytech/bridges-ci:latest as builder -WORKDIR /parity-bridges-common - -COPY . . - -ARG PROJECT=substrate-relay -RUN cargo build --release --verbose -p ${PROJECT} && \ - strip ./target/release/${PROJECT} - -# In this final stage we copy over the final binary and do some checks -# to make sure that everything looks good. -FROM docker.io/library/ubuntu:20.04 as runtime - -# show backtraces -ENV RUST_BACKTRACE 1 -ENV DEBIAN_FRONTEND=noninteractive - -RUN set -eux; \ - apt-get update && \ - apt-get install -y --no-install-recommends \ - curl ca-certificates libssl-dev && \ - update-ca-certificates && \ - groupadd -g 1000 user && \ - useradd -u 1000 -g user -s /bin/sh -m user && \ - # apt clean up - apt-get autoremove -y && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# switch to non-root user -USER user - -WORKDIR /home/user - -ARG PROJECT=substrate-relay - -COPY --chown=user:user --from=builder /parity-bridges-common/target/release/${PROJECT} ./ -COPY --chown=user:user --from=builder /parity-bridges-common/deployments/local-scripts/bridge-entrypoint.sh ./ - -# check if executable works in this container -RUN ./${PROJECT} --version - -ENV PROJECT=$PROJECT -ENTRYPOINT ["/home/user/bridge-entrypoint.sh"] - -# metadata -ARG VCS_REF=master -ARG BUILD_DATE="" -ARG VERSION="" - -LABEL org.opencontainers.image.title="${PROJECT}" \ - org.opencontainers.image.description="${PROJECT} - component of Parity Bridges Common" \ - org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/Dockerfile" \ - org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/Dockerfile" \ - org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/README.md" \ - org.opencontainers.image.created="${BUILD_DATE}" \ - org.opencontainers.image.version="${VERSION}" \ - org.opencontainers.image.revision="${VCS_REF}" \ - org.opencontainers.image.authors="devops-team@parity.io" \ - org.opencontainers.image.vendor="Parity Technologies" \ - org.opencontainers.image.licenses="GPL-3.0 License" diff --git a/polkadot/bridges/LICENSE b/polkadot/bridges/LICENSE deleted file mode 100644 index 733c072369c..00000000000 --- a/polkadot/bridges/LICENSE +++ /dev/null @@ -1,675 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - {one line to give the program's name and a brief idea of what it does.} - Copyright (C) {year} {name of author} - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - {project} Copyright (C) {year} {fullname} - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/polkadot/bridges/README.md b/polkadot/bridges/README.md deleted file mode 100644 index ac3e49b94c6..00000000000 --- a/polkadot/bridges/README.md +++ /dev/null @@ -1,247 +0,0 @@ -# Parity Bridges Common - -This is a collection of components for building bridges. - -These components include Substrate pallets for syncing headers, passing arbitrary messages, as well -as libraries for building relayers to provide cross-chain communication capabilities. - -Three bridge nodes are also available. The nodes can be used to run test networks which bridge other -Substrate chains. - -🚧 The bridges are currently under construction - a hardhat is recommended beyond this point 🚧 - -## Contents - -- [Installation](#installation) -- [High-Level Architecture](#high-level-architecture) -- [Project Layout](#project-layout) -- [Running the Bridge](#running-the-bridge) -- [How to send a message](#how-to-send-a-message) -- [Community](#community) - -## Installation - -To get up and running you need both stable and nightly Rust. Rust nightly is used to build the Web -Assembly (WASM) runtime for the node. You can configure the WASM support as so: - -```bash -rustup install nightly -rustup target add wasm32-unknown-unknown --toolchain nightly -``` - -Once this is configured you can build and test the repo as follows: - -``` -git clone https://github.com/paritytech/parity-bridges-common.git -cd parity-bridges-common -cargo build --all -cargo test --all -``` - -Also you can build the repo with -[Parity CI Docker image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): - -```bash -docker pull paritytech/bridges-ci:production -mkdir ~/cache -chown 1000:1000 ~/cache #processes in the container runs as "nonroot" user with UID 1000 -docker run --rm -it -w /shellhere/parity-bridges-common \ - -v /home/$(whoami)/cache/:/cache/ \ - -v "$(pwd)":/shellhere/parity-bridges-common \ - -e CARGO_HOME=/cache/cargo/ \ - -e SCCACHE_DIR=/cache/sccache/ \ - -e CARGO_TARGET_DIR=/cache/target/ paritytech/bridges-ci:production cargo build --all -#artifacts can be found in ~/cache/target -``` - -If you want to reproduce other steps of CI process you can use the following -[guide](https://github.com/paritytech/scripts#reproduce-ci-locally). - -If you need more information about setting up your development environment Substrate's -[Getting Started](https://substrate.dev/docs/en/knowledgebase/getting-started/) page is a good -resource. - -## High-Level Architecture - -This repo has support for bridging foreign chains together using a combination of Substrate pallets -and external processes called relayers. A bridge chain is one that is able to follow the consensus -of a foreign chain independently. For example, consider the case below where we want to bridge two -Substrate based chains. - -``` -+---------------+ +---------------+ -| | | | -| Rialto | | Millau | -| | | | -+-------+-------+ +-------+-------+ - ^ ^ - | +---------------+ | - | | | | - +-----> | Bridge Relay | <-------+ - | | - +---------------+ -``` - -The Millau chain must be able to accept Rialto headers and verify their integrity. It does this by -using a runtime module designed to track GRANDPA finality. Since two blockchains can't interact -directly they need an external service, called a relayer, to communicate. The relayer will subscribe -to new Rialto headers via RPC and submit them to the Millau chain for verification. - -Take a look at [Bridge High Level Documentation](./docs/high-level-overview.md) for more in-depth -description of the bridge interaction. - -## Project Layout - -Here's an overview of how the project is laid out. The main bits are the `node`, which is the actual -"blockchain", the `modules` which are used to build the blockchain's logic (a.k.a the runtime) and -the `relays` which are used to pass messages between chains. - -``` -├── bin // Node and Runtime for the various Substrate chains -│ └── ... -├── deployments // Useful tools for deploying test networks -│ └── ... -├── diagrams // Pretty pictures of the project architecture -│ └── ... -├── modules // Substrate Runtime Modules (a.k.a Pallets) -│ ├── grandpa // On-Chain GRANDPA Light Client -│ ├── messages // Cross Chain Message Passing -│ ├── dispatch // Target Chain Message Execution -│ └── ... -├── primitives // Code shared between modules, runtimes, and relays -│ └── ... -├── relays // Application for sending headers and messages between chains -│ └── ... -└── scripts // Useful development and maintenance scripts -``` - -## Running the Bridge - -To run the Bridge you need to be able to connect the bridge relay node to the RPC interface of nodes -on each side of the bridge (source and target chain). - -There are 2 ways to run the bridge, described below: - -- building & running from source -- running a Docker Compose setup (recommended). - -### Using the Source - -First you'll need to build the bridge nodes and relay. This can be done as follows: - -```bash -# In `parity-bridges-common` folder -cargo build -p rialto-bridge-node -cargo build -p millau-bridge-node -cargo build -p substrate-relay -``` - -### Running a Dev network - -We will launch a dev network to demonstrate how to relay a message between two Substrate based -chains (named Rialto and Millau). - -To do this we will need two nodes, two relayers which will relay headers, and two relayers which -will relay messages. - -#### Running from local scripts - -To run a simple dev network you can use the scripts located in the -[`deployments/local-scripts` folder](./deployments/local-scripts). - -First, we must run the two Substrate nodes. - -```bash -# In `parity-bridges-common` folder -./deployments/local-scripts/run-rialto-node.sh -./deployments/local-scripts/run-millau-node.sh -``` - -After the nodes are up we can run the header relayers. - -```bash -./deployments/local-scripts/relay-millau-to-rialto.sh -./deployments/local-scripts/relay-rialto-to-millau.sh -``` - -At this point you should see the relayer submitting headers from the Millau Substrate chain to the -Rialto Substrate chain. - -``` -# Header Relayer Logs -[Millau_to_Rialto_Sync] [date] DEBUG bridge Going to submit finality proof of Millau header #147 to Rialto -[...] [date] INFO bridge Synced 147 of 147 headers -[...] [date] DEBUG bridge Going to submit finality proof of Millau header #148 to Rialto -[...] [date] INFO bridge Synced 148 of 149 headers -``` - -Finally, we can run the message relayers. - -```bash -./deployments/local-scripts/relay-messages-millau-to-rialto.sh -./deployments/local-scripts/relay-messages-rialto-to-millau.sh -``` - -You will also see the message lane relayers listening for new messages. - -``` -# Message Relayer Logs -[Millau_to_Rialto_MessageLane_00000000] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about best message nonces -[...] [date] INFO bridge Synced Some(2) of Some(3) nonces in Millau::MessagesDelivery -> Rialto::MessagesDelivery race -[...] [date] DEBUG bridge Asking Millau::MessagesDelivery about message nonces -[...] [date] DEBUG bridge Received best nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Asking Millau::ReceivingConfirmationsDelivery about finalized message nonces -[...] [date] DEBUG bridge Received finalized nonces from Millau::ReceivingConfirmationsDelivery: TargetClientNonces { latest_nonce: 0, nonces_data: () } -[...] [date] DEBUG bridge Received nonces from Millau::MessagesDelivery: SourceClientNonces { new_nonces: {}, confirmed_nonce: Some(0) } -[...] [date] DEBUG bridge Asking Millau node about its state -[...] [date] DEBUG bridge Received state from Millau node: ClientState { best_self: HeaderId(1593, 0xacac***), best_finalized_self: HeaderId(1590, 0x0be81d...), best_finalized_peer_at_best_self: HeaderId(0, 0xdcdd89...) } -``` - -To send a message see the ["How to send a message" section](#how-to-send-a-message). - -### Full Network Docker Compose Setup - -For a more sophisticated deployment which includes bidirectional header sync, message passing, -monitoring dashboards, etc. see the [Deployments README](./deployments/README.md). - -You should note that you can find images for all the bridge components published on -[Docker Hub](https://hub.docker.com/u/paritytech). - -To run a Rialto node for example, you can use the following command: - -```bash -docker run -p 30333:30333 -p 9933:9933 -p 9944:9944 \ - -it paritytech/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external -``` - -### How to send a message - -In this section we'll show you how to quickly send a bridge message, if you want to -interact with and test the bridge see more details in [send message](./docs/send-message.md) - -```bash -# In `parity-bridges-common` folder -./scripts/send-message-from-millau-rialto.sh remark -``` - -After sending a message you will see the following logs showing a message was successfully sent: - -``` -INFO bridge Sending message to Rialto. Size: 286. Dispatch weight: 1038000. Fee: 275,002,568 -INFO bridge Signed Millau Call: 0x7904... -TRACE bridge Sent transaction to Millau node: 0x5e68... -``` - -## Community - -Main hangout for the community is [Element](https://element.io/) (formerly Riot). Element is a chat -server like, for example, Discord. Most discussions around Polkadot and Substrate happen -in various Element "rooms" (channels). So, joining Element might be a good idea, anyway. - -If you are interested in information exchange and development of Polkadot related bridges please -feel free to join the [Polkadot Bridges](https://app.element.io/#/room/#bridges:web3.foundation) -Element channel. - -The [Substrate Technical](https://app.element.io/#/room/#substrate-technical:matrix.org) Element -channel is most suited for discussions regarding Substrate itself. diff --git a/polkadot/bridges/bin/.keep b/polkadot/bridges/bin/.keep deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/polkadot/bridges/bin/millau/node/Cargo.toml b/polkadot/bridges/bin/millau/node/Cargo.toml deleted file mode 100644 index 3825b92b703..00000000000 --- a/polkadot/bridges/bin/millau/node/Cargo.toml +++ /dev/null @@ -1,63 +0,0 @@ -[package] -name = "millau-bridge-node" -description = "Substrate node compatible with Millau runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -build = "build.rs" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -clap = { version = "3.1", features = ["derive"] } -jsonrpc-core = "18.0" -serde_json = "1.0.59" - -# Bridge dependencies - -bp-millau = { path = "../../../primitives/chain-millau" } -bp-runtime = { path = "../../../primitives/runtime" } -millau-runtime = { path = "../runtime" } -pallet-bridge-messages = { path = "../../../modules/messages" } - -# Substrate Dependencies - -beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } -beefy-gadget-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = [] -runtime-benchmarks = [ - "millau-runtime/runtime-benchmarks", -] diff --git a/polkadot/bridges/bin/millau/node/build.rs b/polkadot/bridges/bin/millau/node/build.rs deleted file mode 100644 index d9b50049e26..00000000000 --- a/polkadot/bridges/bin/millau/node/build.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; - -fn main() { - generate_cargo_keys(); - - rerun_if_git_head_changed(); -} diff --git a/polkadot/bridges/bin/millau/node/src/chain_spec.rs b/polkadot/bridges/bin/millau/node/src/chain_spec.rs deleted file mode 100644 index a7e3c7c8771..00000000000 --- a/polkadot/bridges/bin/millau/node/src/chain_spec.rs +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use beefy_primitives::crypto::AuthorityId as BeefyId; -use bp_millau::derive_account_from_rialto_id; -use millau_runtime::{ - AccountId, AuraConfig, BalancesConfig, BeefyConfig, BridgeRialtoMessagesConfig, - BridgeWestendGrandpaConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, - Signature, SudoConfig, SystemConfig, WASM_BINARY, -}; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{sr25519, Pair, Public}; -use sp_finality_grandpa::AuthorityId as GrandpaId; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::GenericChainSpec; - -/// The chain specification option. This is expected to come in from the CLI and -/// is little more than one of a number of alternatives which can easily be converted -/// from a string (`--chain=...`) into a `ChainSpec`. -#[derive(Clone, Debug)] -pub enum Alternative { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob/Charlie/Dave/Eve auths. - LocalTestnet, -} - -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -type AccountPublic = ::Signer; - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AccountId, AuraId, BeefyId, GrandpaId) { - ( - get_account_id_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - ) -} - -impl Alternative { - /// Get an actual chain config from one of the alternatives. - pub(crate) fn load(self) -> ChainSpec { - let properties = Some( - serde_json::json!({ - "tokenDecimals": 9, - "tokenSymbol": "MLAU" - }) - .as_object() - .expect("Map given; qed") - .clone(), - ); - match self { - Alternative::Development => ChainSpec::from_genesis( - "Millau Development", - "millau_dev", - sc_service::ChainType::Development, - || { - testnet_genesis( - vec![get_authority_keys_from_seed("Alice")], - get_account_id_from_seed::("Alice"), - endowed_accounts(), - true, - ) - }, - vec![], - None, - None, - None, - properties, - None, - ), - Alternative::LocalTestnet => ChainSpec::from_genesis( - "Millau Local", - "millau_local", - sc_service::ChainType::Local, - || { - testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - get_authority_keys_from_seed("Charlie"), - get_authority_keys_from_seed("Dave"), - get_authority_keys_from_seed("Eve"), - ], - get_account_id_from_seed::("Alice"), - endowed_accounts(), - true, - ) - }, - vec![], - None, - None, - None, - properties, - None, - ), - } - } -} - -/// We're using the same set of endowed accounts on all Millau chains (dev/local) to make -/// sure that all accounts, required for bridge to be functional (e.g. relayers fund account, -/// accounts used by relayers in our test deployments, accounts used for demonstration -/// purposes), are all available on these chains. -fn endowed_accounts() -> Vec { - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("George"), - get_account_id_from_seed::("Harry"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - get_account_id_from_seed::("George//stash"), - get_account_id_from_seed::("Harry//stash"), - get_account_id_from_seed::("RialtoMessagesOwner"), - get_account_id_from_seed::("WithRialtoTokenSwap"), - pallet_bridge_messages::relayer_fund_account_id::< - bp_millau::AccountId, - bp_millau::AccountIdConverter, - >(), - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Alice"), - )), - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Bob"), - )), - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Charlie"), - )), - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Dave"), - )), - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Eve"), - )), - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Ferdie"), - )), - ] -} - -fn session_keys(aura: AuraId, beefy: BeefyId, grandpa: GrandpaId) -> SessionKeys { - SessionKeys { aura, beefy, grandpa } -} - -fn testnet_genesis( - initial_authorities: Vec<(AccountId, AuraId, BeefyId, GrandpaId)>, - root_key: AccountId, - endowed_accounts: Vec, - _enable_println: bool, -) -> GenesisConfig { - GenesisConfig { - system: SystemConfig { - code: WASM_BINARY.expect("Millau development WASM not available").to_vec(), - }, - balances: BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), - }, - aura: AuraConfig { authorities: Vec::new() }, - beefy: BeefyConfig { authorities: Vec::new() }, - grandpa: GrandpaConfig { authorities: Vec::new() }, - sudo: SudoConfig { key: Some(root_key) }, - session: SessionConfig { - keys: initial_authorities - .iter() - .map(|x| { - (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone(), x.3.clone())) - }) - .collect::>(), - }, - bridge_westend_grandpa: BridgeWestendGrandpaConfig { - // for our deployments to avoid multiple same-nonces transactions: - // //Alice is already used to initialize Rialto<->Millau bridge - // => let's use //George to initialize Westend->Millau bridge - owner: Some(get_account_id_from_seed::("George")), - ..Default::default() - }, - bridge_rialto_messages: BridgeRialtoMessagesConfig { - owner: Some(get_account_id_from_seed::("RialtoMessagesOwner")), - ..Default::default() - }, - } -} - -#[test] -fn derived_dave_account_is_as_expected() { - let dave = get_account_id_from_seed::("Dave"); - let derived: AccountId = - derive_account_from_rialto_id(bp_runtime::SourceAccount::Account(dave)); - assert_eq!(derived.to_string(), "5DNW6UVnb7TN6wX5KwXtDYR3Eccecbdzuw89HqjyNfkzce6J".to_string()); -} diff --git a/polkadot/bridges/bin/millau/node/src/cli.rs b/polkadot/bridges/bin/millau/node/src/cli.rs deleted file mode 100644 index c3c3d134e34..00000000000 --- a/polkadot/bridges/bin/millau/node/src/cli.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use clap::Parser; -use sc_cli::RunCmd; - -#[derive(Debug, Parser)] -pub struct Cli { - #[structopt(subcommand)] - pub subcommand: Option, - - #[structopt(flatten)] - pub run: RunCmd, -} - -/// Possible subcommands of the main binary. -#[derive(Debug, Parser)] -pub enum Subcommand { - /// Key management CLI utilities - #[clap(subcommand)] - Key(sc_cli::KeySubcommand), - - /// Verify a signature for a message, provided on `STDIN`, with a given (public or secret) key. - Verify(sc_cli::VerifyCmd), - - /// Generate a seed that provides a vanity address. - Vanity(sc_cli::VanityCmd), - - /// Sign a message, with a given (secret) key. - Sign(sc_cli::SignCmd), - - /// Build a chain specification. - BuildSpec(sc_cli::BuildSpecCmd), - - /// Validate blocks. - CheckBlock(sc_cli::CheckBlockCmd), - - /// Export blocks. - ExportBlocks(sc_cli::ExportBlocksCmd), - - /// Export the state of a given block into a chain spec. - ExportState(sc_cli::ExportStateCmd), - - /// Import blocks. - ImportBlocks(sc_cli::ImportBlocksCmd), - - /// Remove the whole chain. - PurgeChain(sc_cli::PurgeChainCmd), - - /// Revert the chain to a previous state. - Revert(sc_cli::RevertCmd), - - /// Inspect blocks or extrinsics. - Inspect(node_inspect::cli::InspectCmd), - - /// Benchmark runtime pallets. - Benchmark(frame_benchmarking_cli::BenchmarkCmd), -} diff --git a/polkadot/bridges/bin/millau/node/src/command.rs b/polkadot/bridges/bin/millau/node/src/command.rs deleted file mode 100644 index 4dbf9575dfe..00000000000 --- a/polkadot/bridges/bin/millau/node/src/command.rs +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - cli::{Cli, Subcommand}, - service, - service::new_partial, -}; -use millau_runtime::{Block, RuntimeApi}; -use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli}; -use sc_service::PartialComponents; - -impl SubstrateCli for Cli { - fn impl_name() -> String { - "Millau Bridge Node".into() - } - - fn impl_version() -> String { - env!("CARGO_PKG_VERSION").into() - } - - fn description() -> String { - "Millau Bridge Node".into() - } - - fn author() -> String { - "Parity Technologies".into() - } - - fn support_url() -> String { - "https://github.com/paritytech/parity-bridges-common/".into() - } - - fn copyright_start_year() -> i32 { - 2019 - } - - fn executable_name() -> String { - "millau-bridge-node".into() - } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &millau_runtime::VERSION - } - - fn load_spec(&self, id: &str) -> Result, String> { - Ok(Box::new( - match id { - "" | "dev" => crate::chain_spec::Alternative::Development, - "local" => crate::chain_spec::Alternative::LocalTestnet, - _ => return Err(format!("Unsupported chain specification: {}", id)), - } - .load(), - )) - } -} - -/// Parse and run command line arguments -pub fn run() -> sc_cli::Result<()> { - let cli = Cli::from_args(); - // make sure to set correct crypto version. - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( - millau_runtime::SS58Prefix::get() as u16, - )); - - match &cli.subcommand { - Some(Subcommand::Benchmark(cmd)) => - if cfg!(feature = "runtime-benchmarks") { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run::(config)) - } else { - println!( - "Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - ); - Ok(()) - }, - Some(Subcommand::Key(cmd)) => cmd.run(&cli), - Some(Subcommand::Sign(cmd)) => cmd.run(), - Some(Subcommand::Verify(cmd)) => cmd.run(), - Some(Subcommand::Vanity(cmd)) => cmd.run(), - Some(Subcommand::BuildSpec(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) - }, - Some(Subcommand::CheckBlock(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, .. } = - new_partial(&config)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - }, - Some(Subcommand::ExportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { client, task_manager, .. } = new_partial(&config)?; - Ok((cmd.run(client, config.database), task_manager)) - }) - }, - Some(Subcommand::ExportState(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { client, task_manager, .. } = new_partial(&config)?; - Ok((cmd.run(client, config.chain_spec), task_manager)) - }) - }, - Some(Subcommand::ImportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { client, task_manager, import_queue, .. } = - new_partial(&config)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - }, - Some(Subcommand::PurgeChain(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.database)) - }, - Some(Subcommand::Revert(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|config| { - let PartialComponents { client, task_manager, backend, .. } = new_partial(&config)?; - Ok((cmd.run(client, backend), task_manager)) - }) - }, - Some(Subcommand::Inspect(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner - .sync_run(|config| cmd.run::(config)) - }, - None => { - let runner = cli.create_runner(&cli.run)?; - runner.run_node_until_exit(|config| async move { - service::new_full(config).map_err(sc_cli::Error::Service) - }) - }, - } -} diff --git a/polkadot/bridges/bin/millau/node/src/lib.rs b/polkadot/bridges/bin/millau/node/src/lib.rs deleted file mode 100644 index 382d1c2d7fb..00000000000 --- a/polkadot/bridges/bin/millau/node/src/lib.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate Node Template CLI library. -#![warn(missing_docs)] - -mod chain_spec; -#[macro_use] -mod service; -mod cli; -mod command; - -/// Node run result. -pub type Result = sc_cli::Result<()>; - -/// Run node. -pub fn run() -> Result { - command::run() -} diff --git a/polkadot/bridges/bin/millau/node/src/main.rs b/polkadot/bridges/bin/millau/node/src/main.rs deleted file mode 100644 index cf6dd9f733a..00000000000 --- a/polkadot/bridges/bin/millau/node/src/main.rs +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau bridge node. - -#![warn(missing_docs)] - -mod chain_spec; -#[macro_use] -mod service; -mod cli; -mod command; - -/// Run the Millau Node -fn main() -> sc_cli::Result<()> { - command::run() -} diff --git a/polkadot/bridges/bin/millau/node/src/service.rs b/polkadot/bridges/bin/millau/node/src/service.rs deleted file mode 100644 index 15f88269aa9..00000000000 --- a/polkadot/bridges/bin/millau/node/src/service.rs +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. - -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== -// UPDATE GUIDE: -// 1) replace everything with node-template/src/service.rs contents (found in main Substrate repo); -// 2) from old code keep `rpc_extensions_builder` - we use our own custom RPCs; -// 3) from old code keep the Beefy gadget; -// 4) fix compilation errors; -// 5) test :) -// ===================================================================================== -// ===================================================================================== -// ===================================================================================== - -use millau_runtime::{self, opaque::Block, RuntimeApi}; -use sc_client_api::{BlockBackend, ExecutorProvider}; -use sc_consensus_aura::{ImportQueueParams, SlotProportion, StartAuraParams}; -pub use sc_executor::NativeElseWasmExecutor; -use sc_finality_grandpa::SharedVoterState; -use sc_keystore::LocalKeystore; -use sc_service::{error::Error as ServiceError, Configuration, TaskManager}; -use sc_telemetry::{Telemetry, TelemetryWorker}; -use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -use std::{sync::Arc, time::Duration}; - -// Our native executor instance. -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - /// Only enable the benchmarking host functions when we actually want to benchmark. - #[cfg(feature = "runtime-benchmarks")] - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - /// Otherwise we only use the default Substrate host functions. - #[cfg(not(feature = "runtime-benchmarks"))] - type ExtendHostFunctions = (); - - fn dispatch(method: &str, data: &[u8]) -> Option> { - millau_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - millau_runtime::native_version() - } -} - -type FullClient = - sc_service::TFullClient>; -type FullBackend = sc_service::TFullBackend; -type FullSelectChain = sc_consensus::LongestChain; - -#[allow(clippy::type_complexity)] -pub fn new_partial( - config: &Configuration, -) -> Result< - sc_service::PartialComponents< - FullClient, - FullBackend, - FullSelectChain, - sc_consensus::DefaultImportQueue, - sc_transaction_pool::FullPool, - ( - sc_finality_grandpa::GrandpaBlockImport< - FullBackend, - Block, - FullClient, - FullSelectChain, - >, - sc_finality_grandpa::LinkHalf, - Option, - ), - >, - ServiceError, -> { - if config.keystore_remote.is_some() { - return Err(ServiceError::Other("Remote Keystores are not supported.".into())) - } - - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let executor = NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", None, worker.run()); - telemetry - }); - - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain.clone(), - telemetry.as_ref().map(|x| x.handle()), - )?; - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - - let import_queue = - sc_consensus_aura::import_queue::(ImportQueueParams { - block_import: grandpa_block_import.clone(), - justification_import: Some(Box::new(grandpa_block_import.clone())), - client: client.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - spawner: &task_manager.spawn_essential_handle(), - can_author_with: sp_consensus::CanAuthorWithNativeVersion::new( - client.executor().clone(), - ), - registry: config.prometheus_registry(), - check_for_equivocation: Default::default(), - telemetry: telemetry.as_ref().map(|x| x.handle()), - })?; - - Ok(sc_service::PartialComponents { - client, - backend, - task_manager, - import_queue, - keystore_container, - select_chain, - transaction_pool, - other: (grandpa_block_import, grandpa_link, telemetry), - }) -} - -fn remote_keystore(_url: &str) -> Result, &'static str> { - // FIXME: here would the concrete keystore be built, - // must return a concrete type (NOT `LocalKeystore`) that - // implements `CryptoStore` and `SyncCryptoStore` - Err("Remote Keystore not supported.") -} - -/// Builds a new service for a full client. -pub fn new_full(mut config: Configuration) -> Result { - let sc_service::PartialComponents { - client, - backend, - mut task_manager, - import_queue, - mut keystore_container, - select_chain, - transaction_pool, - other: (block_import, grandpa_link, mut telemetry), - } = new_partial(&config)?; - - if let Some(url) = &config.keystore_remote { - match remote_keystore(url) { - Ok(k) => keystore_container.set_remote_keystore(k), - Err(e) => - return Err(ServiceError::Other(format!( - "Error hooking up remote keystore for {}: {}", - url, e - ))), - }; - } - - // Note: GrandPa is pushed before the Polkadot-specific protocols. This doesn't change - // anything in terms of behaviour, but makes the logs more consistent with the other - // Substrate nodes. - let grandpa_protocol_name = sc_finality_grandpa::protocol_standard_name( - &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), - &config.chain_spec, - ); - config - .network - .extra_sets - .push(sc_finality_grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone())); - - let beefy_protocol_name = beefy_gadget::protocol_standard_name( - &client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"), - &config.chain_spec, - ); - config - .network - .extra_sets - .push(beefy_gadget::beefy_peers_set_config(beefy_protocol_name.clone())); - - let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( - backend.clone(), - grandpa_link.shared_authority_set().clone(), - Vec::default(), - )); - - let (network, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync: Some(warp_sync), - })?; - - if config.offchain_worker.enabled { - sc_service::build_offchain_workers( - &config, - task_manager.spawn_handle(), - client.clone(), - network.clone(), - ); - } - - let role = config.role.clone(); - let force_authoring = config.force_authoring; - let backoff_authoring_blocks: Option<()> = None; - let name = config.network.node_name.clone(); - let enable_grandpa = !config.disable_grandpa; - let prometheus_registry = config.prometheus_registry().cloned(); - let shared_voter_state = SharedVoterState::empty(); - let (beefy_commitment_link, beefy_commitment_stream) = - beefy_gadget::notification::BeefySignedCommitmentStream::::channel(); - let (beefy_best_block_link, beefy_best_block_stream) = - beefy_gadget::notification::BeefyBestBlockStream::::channel(); - - let rpc_extensions_builder = { - use sc_finality_grandpa::FinalityProofProvider as GrandpaFinalityProofProvider; - - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; - use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler}; - use sc_rpc::DenyUnsafe; - use substrate_frame_rpc_system::{FullSystem, SystemApi}; - - let backend = backend.clone(); - let client = client.clone(); - let pool = transaction_pool.clone(); - - let justification_stream = grandpa_link.justification_stream(); - let shared_authority_set = grandpa_link.shared_authority_set().clone(); - let shared_voter_state = shared_voter_state.clone(); - - let finality_proof_provider = GrandpaFinalityProofProvider::new_for_service( - backend, - Some(shared_authority_set.clone()), - ); - - Box::new(move |_, subscription_executor: sc_rpc::SubscriptionTaskExecutor| { - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with(SystemApi::to_delegate(FullSystem::new( - client.clone(), - pool.clone(), - DenyUnsafe::No, - ))); - io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( - client.clone(), - ))); - io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new( - shared_authority_set.clone(), - shared_voter_state.clone(), - justification_stream.clone(), - subscription_executor.clone(), - finality_proof_provider.clone(), - ))); - io.extend_with(beefy_gadget_rpc::BeefyApi::to_delegate( - beefy_gadget_rpc::BeefyRpcHandler::::new( - beefy_commitment_stream.clone(), - beefy_best_block_stream.clone(), - subscription_executor, - ) - .map_err(|e| sc_service::Error::Other(format!("{}", e)))?, - )); - io.extend_with(pallet_mmr_rpc::MmrApi::to_delegate(pallet_mmr_rpc::Mmr::new( - client.clone(), - ))); - Ok(io) - }) - }; - - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network: network.clone(), - client: client.clone(), - keystore: keystore_container.sync_keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_extensions_builder, - backend: backend.clone(), - system_rpc_tx, - config, - telemetry: telemetry.as_mut(), - })?; - - if role.is_authority() { - let proposer_factory = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry.as_ref(), - telemetry.as_ref().map(|x| x.handle()), - ); - - let can_author_with = - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - - let slot_duration = sc_consensus_aura::slot_duration(&*client)?; - - let aura = sc_consensus_aura::start_aura::( - StartAuraParams { - slot_duration, - client: client.clone(), - select_chain, - block_import, - proposer_factory, - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((timestamp, slot)) - }, - force_authoring, - backoff_authoring_blocks, - keystore: keystore_container.sync_keystore(), - can_author_with, - sync_oracle: network.clone(), - justification_sync_link: network.clone(), - block_proposal_slot_portion: SlotProportion::new(2f32 / 3f32), - max_block_proposal_slot_portion: None, - telemetry: telemetry.as_ref().map(|x| x.handle()), - }, - )?; - - // the AURA authoring task is considered essential, i.e. if it - // fails we take down the service with it. - task_manager - .spawn_essential_handle() - .spawn_blocking("aura", Some("block-authoring"), aura); - } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = - if role.is_authority() { Some(keystore_container.sync_keystore()) } else { None }; - - let beefy_params = beefy_gadget::BeefyParams { - client, - backend, - key_store: keystore.clone(), - network: network.clone(), - signed_commitment_sender: beefy_commitment_link, - beefy_best_block_sender: beefy_best_block_link, - min_block_delta: 4, - prometheus_registry: prometheus_registry.clone(), - protocol_name: beefy_protocol_name, - }; - - // Start the BEEFY bridge gadget. - task_manager.spawn_essential_handle().spawn_blocking( - "beefy-gadget", - None, - beefy_gadget::start_beefy_gadget::<_, _, _, _>(beefy_params), - ); - - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - local_role: role, - telemetry: telemetry.as_ref().map(|x| x.handle()), - protocol_name: grandpa_protocol_name, - }; - - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: non-authorities could run the GRANDPA observer protocol, but at - // this point the full voter should provide better guarantees of block - // and vote data availability than the observer. The observer has not - // been tested extensively yet and having most nodes in a network run it - // could lead to finality stalls. - let grandpa_config = sc_finality_grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network, - voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), - prometheus_registry, - shared_voter_state, - telemetry: telemetry.as_ref().map(|x| x.handle()), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - task_manager.spawn_essential_handle().spawn_blocking( - "grandpa-voter", - None, - sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, - ); - } - - network_starter.start_network(); - Ok(task_manager) -} diff --git a/polkadot/bridges/bin/millau/runtime/Cargo.toml b/polkadot/bridges/bin/millau/runtime/Cargo.toml deleted file mode 100644 index 979417c4cc8..00000000000 --- a/polkadot/bridges/bin/millau/runtime/Cargo.toml +++ /dev/null @@ -1,135 +0,0 @@ -[package] -name = "millau-runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -hex-literal = "0.3" -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -libsecp256k1 = { version = "0.7", optional = true, default-features = false, features = ["hmac"] } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true, features = ["derive"] } - -# Bridge dependencies - -bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } -bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } -bp-messages = { path = "../../../primitives/messages", default-features = false } -bp-millau = { path = "../../../primitives/chain-millau", default-features = false } -bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } -bp-runtime = { path = "../../../primitives/runtime", default-features = false } -bp-westend = { path = "../../../primitives/chain-westend", default-features = false } -bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } -pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } -pallet-bridge-token-swap = { path = "../../../modules/token-swap", default-features = false } -pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } - -# Substrate Dependencies - -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-beefy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -bridge-runtime-common = { path = "../../runtime-common", features = ["integrity-test"] } -static_assertions = "1.1" - -[build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "beefy-primitives/std", - "bp-header-chain/std", - "bp-message-dispatch/std", - "bp-messages/std", - "bp-millau/std", - "bp-rialto/std", - "bp-runtime/std", - "bp-westend/std", - "bridge-runtime-common/std", - "codec/std", - "frame-executive/std", - "frame-support/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "pallet-aura/std", - "pallet-balances/std", - "pallet-beefy/std", - "pallet-beefy-mmr/std", - "pallet-bridge-dispatch/std", - "pallet-bridge-grandpa/std", - "pallet-bridge-messages/std", - "pallet-bridge-token-swap/std", - "pallet-grandpa/std", - "pallet-mmr/std", - "pallet-randomness-collective-flip/std", - "pallet-session/std", - "pallet-shift-session-manager/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "scale-info/std", - "serde", - "sp-api/std", - "sp-block-builder/std", - "sp-consensus-aura/std", - "sp-core/std", - "sp-finality-grandpa/std", - "sp-inherents/std", - "sp-mmr-primitives/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-transaction-pool/std", - "sp-trie/std", - "sp-version/std", -] -runtime-benchmarks = [ - "bridge-runtime-common/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "libsecp256k1", - "pallet-bridge-messages/runtime-benchmarks", - "pallet-bridge-token-swap/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] diff --git a/polkadot/bridges/bin/millau/runtime/build.rs b/polkadot/bridges/bin/millau/runtime/build.rs deleted file mode 100644 index cc865704327..00000000000 --- a/polkadot/bridges/bin/millau/runtime/build.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use substrate_wasm_builder::WasmBuilder; - -fn main() { - WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build() -} diff --git a/polkadot/bridges/bin/millau/runtime/src/lib.rs b/polkadot/bridges/bin/millau/runtime/src/lib.rs deleted file mode 100644 index 4714dab68a4..00000000000 --- a/polkadot/bridges/bin/millau/runtime/src/lib.rs +++ /dev/null @@ -1,1004 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The Millau runtime. This can be compiled with `#[no_std]`, ready for Wasm. - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -pub mod rialto_messages; - -use crate::rialto_messages::{ToRialtoMessagePayload, WithRialtoMessageBridge}; - -use beefy_primitives::{crypto::AuthorityId as BeefyId, mmr::{MmrLeafVersion}, ValidatorSet}; -use bridge_runtime_common::messages::{ - source::estimate_message_dispatch_and_delivery_fee, MessageBridge, -}; -use pallet_grandpa::{ - fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, -}; -use sp_mmr_primitives::{ - DataOrHash, EncodableOpaqueLeaf, Error as MmrError, LeafDataProvider, - BatchProof as MmrBatchProof, Proof as MmrProof, LeafIndex as MmrLeafIndex -}; -use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo}; -use sp_api::impl_runtime_apis; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{Block as BlockT, IdentityLookup, Keccak256, NumberFor, OpaqueKeys}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedPointNumber, FixedU128, MultiSignature, MultiSigner, Perquintill, -}; -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -// A few exports that help ease life for downstream crates. -pub use frame_support::{ - construct_runtime, parameter_types, - traits::{Currency, ExistenceRequirement, Imbalance, KeyOwnerProofSystem}, - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, RuntimeDbWeight, Weight}, - StorageValue, -}; - -pub use frame_system::Call as SystemCall; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_bridge_grandpa::Call as BridgeGrandpaCall; -pub use pallet_bridge_messages::Call as MessagesCall; -pub use pallet_sudo::Call as SudoCall; -pub use pallet_timestamp::Call as TimestampCall; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; - -/// An index to a block. -pub type BlockNumber = bp_millau::BlockNumber; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = bp_millau::Signature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = bp_millau::AccountId; - -/// The type for looking up accounts. We don't expect more than 4 billion of them, but you -/// never know... -pub type AccountIndex = u32; - -/// Balance of an account. -pub type Balance = bp_millau::Balance; - -/// Index of a transaction in the chain. -pub type Index = bp_millau::Index; - -/// A hash of some data used by the chain. -pub type Hash = bp_millau::Hash; - -/// Hashing algorithm used by the chain. -pub type Hashing = bp_millau::Hasher; - -/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know -/// the specifics of the runtime. They can then be made to be agnostic over specific formats -/// of data like extrinsics, allowing for them to continue syncing the network through upgrades -/// to even the core data structures. -pub mod opaque { - use super::*; - - pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; - - /// Opaque block header type. - pub type Header = generic::Header; - /// Opaque block type. - pub type Block = generic::Block; - /// Opaque block identifier type. - pub type BlockId = generic::BlockId; -} - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - pub beefy: Beefy, - pub grandpa: Grandpa, - } -} - -/// This runtime version. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("millau-runtime"), - impl_name: create_runtime_str!("millau-runtime"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, - state_version: 0, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const Version: RuntimeVersion = VERSION; - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 60_000_000, // ~0.06 ms = ~60 µs - write: 200_000_000, // ~0.2 ms = 200 µs - }; - pub const SS58Prefix: u8 = 60; -} - -impl frame_system::Config for Runtime { - /// The basic call filter to use in dispatchable. - type BaseCallFilter = frame_support::traits::Everything; - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; - /// The index type for storing how many extrinsics an account has signed. - type Index = Index; - /// The index type for blocks. - type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = Hashing; - /// The header type. - type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous origin type. - type Origin = Origin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Version of the runtime. - type Version = Version; - /// Provides information about the pallet setup in the runtime. - type PalletInfo = PalletInfo; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); - /// Block and extrinsics weights: base values and limits. - type BlockWeights = bp_millau::BlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = bp_millau::BlockLength; - /// The weight of database operations that the runtime can invoke. - type DbWeight = DbWeight; - /// The designated `SS58` prefix of this chain. - type SS58Prefix = SS58Prefix; - /// The set code logic, just the default since we're not a parachain. - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -impl pallet_randomness_collective_flip::Config for Runtime {} - -parameter_types! { - pub const MaxAuthorities: u32 = 10; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type MaxAuthorities = MaxAuthorities; - type DisabledValidators = (); -} - -impl pallet_beefy::Config for Runtime { - type BeefyId = BeefyId; - type MaxAuthorities = MaxAuthorities; -} - -impl pallet_bridge_dispatch::Config for Runtime { - type Event = Event; - type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); - type Call = Call; - type CallFilter = frame_support::traits::Everything; - type EncodedCall = crate::rialto_messages::FromRialtoEncodedCall; - type SourceChainAccountId = bp_rialto::AccountId; - type TargetChainAccountPublic = MultiSigner; - type TargetChainSignature = MultiSignature; - type AccountIdConverter = bp_millau::AccountIdConverter; -} - -impl pallet_grandpa::Config for Runtime { - type Event = Event; - type Call = Call; - type KeyOwnerProofSystem = (); - type KeyOwnerProof = - >::Proof; - type KeyOwnerIdentification = >::IdentificationTuple; - type HandleEquivocation = (); - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); - type MaxAuthorities = MaxAuthorities; -} - -type MmrHash = ::Output; - -impl pallet_mmr::Config for Runtime { - const INDEXING_PREFIX: &'static [u8] = b"mmr"; - type Hashing = Keccak256; - type Hash = MmrHash; - type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; - type WeightInfo = (); - type LeafData = pallet_beefy_mmr::Pallet; -} - -parameter_types! { - /// Version of the produced MMR leaf. - /// - /// The version consists of two parts; - /// - `major` (3 bits) - /// - `minor` (5 bits) - /// - /// `major` should be updated only if decoding the previous MMR Leaf format from the payload - /// is not possible (i.e. backward incompatible change). - /// `minor` should be updated if fields are added to the previous MMR Leaf, which given SCALE - /// encoding does not prevent old leafs from being decoded. - /// - /// Hence we expect `major` to be changed really rarely (think never). - /// See [`MmrLeafVersion`] type documentation for more details. - pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); -} - -impl pallet_beefy_mmr::Config for Runtime { - type LeafVersion = LeafVersion; - type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; - type BeefyDataProvider = (); -} - -parameter_types! { - pub const MinimumPeriod: u64 = bp_millau::SLOT_DURATION / 2; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the UNIX epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = MinimumPeriod; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: bp_millau::Balance = 500; - // For weight estimation, we assume that the most locks on an individual account will be 50. - // This number may need to be adjusted in the future if this assumption no longer holds true. - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; -} - -parameter_types! { - pub const TransactionBaseFee: Balance = 0; - pub const TransactionByteFee: Balance = 1; - pub const OperationalFeeMultiplier: u8 = 5; - // values for following parameters are copied from polkadot repo, but it is fine - // not to sync them - we're not going to make Rialto a full copy of one of Polkadot-like chains - pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); - pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); - pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); -} - -impl pallet_transaction_payment::Config for Runtime { - type Event = Event; - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type TransactionByteFee = TransactionByteFee; - type OperationalFeeMultiplier = OperationalFeeMultiplier; - type WeightToFee = bp_millau::WeightToFee; - type FeeMultiplierUpdate = pallet_transaction_payment::TargetedFeeAdjustment< - Runtime, - TargetBlockFullness, - AdjustmentVariable, - MinimumMultiplier, - >; -} - -impl pallet_sudo::Config for Runtime { - type Event = Event; - type Call = Call; -} - -parameter_types! { - /// Authorities are changing every 5 minutes. - pub const Period: BlockNumber = bp_millau::SESSION_LENGTH; - pub const Offset: BlockNumber = 0; -} - -impl pallet_session::Config for Runtime { - type Event = Event; - type ValidatorId = ::AccountId; - type ValidatorIdOf = (); - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = pallet_shift_session_manager::Pallet; - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -parameter_types! { - // This is a pretty unscientific cap. - // - // Note that once this is hit the pallet will essentially throttle incoming requests down to one - // call per block. - pub const MaxRequests: u32 = 50; -} - -#[cfg(feature = "runtime-benchmarks")] -parameter_types! { - /// Number of headers to keep in benchmarks. - /// - /// In benchmarks we always populate with full number of `HeadersToKeep` to make sure that - /// pruning is taken into account. - /// - /// Note: This is lower than regular value, to speed up benchmarking setup. - pub const HeadersToKeep: u32 = 1024; -} - -#[cfg(not(feature = "runtime-benchmarks"))] -parameter_types! { - /// Number of headers to keep. - /// - /// Assuming the worst case of every header being finalized, we will keep headers at least for a - /// week. - pub const HeadersToKeep: u32 = 7 * bp_rialto::DAYS as u32; -} - -pub type RialtoGrandpaInstance = (); -impl pallet_bridge_grandpa::Config for Runtime { - type BridgedChain = bp_rialto::Rialto; - type MaxRequests = MaxRequests; - type HeadersToKeep = HeadersToKeep; - - type WeightInfo = pallet_bridge_grandpa::weights::MillauWeight; -} - -pub type WestendGrandpaInstance = pallet_bridge_grandpa::Instance1; -impl pallet_bridge_grandpa::Config for Runtime { - type BridgedChain = bp_westend::Westend; - type MaxRequests = MaxRequests; - type HeadersToKeep = HeadersToKeep; - - type WeightInfo = pallet_bridge_grandpa::weights::MillauWeight; -} - -impl pallet_shift_session_manager::Config for Runtime {} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: bp_messages::MessageNonce = 8; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_rialto::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - // `IdentityFee` is used by Millau => we may use weight directly - pub const GetDeliveryConfirmationTransactionFee: Balance = - bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; - pub const RootAccountForPayments: Option = None; - pub const RialtoChainId: bp_runtime::ChainId = bp_runtime::RIALTO_CHAIN_ID; -} - -/// Instance of the messages pallet used to relay messages to/from Rialto chain. -pub type WithRialtoMessagesInstance = (); - -impl pallet_bridge_messages::Config for Runtime { - type Event = Event; - type WeightInfo = pallet_bridge_messages::weights::MillauWeight; - type Parameter = rialto_messages::MillauToRialtoMessagesParameter; - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = crate::rialto_messages::ToRialtoMessagePayload; - type OutboundMessageFee = Balance; - - type InboundPayload = crate::rialto_messages::FromRialtoMessagePayload; - type InboundMessageFee = bp_rialto::Balance; - type InboundRelayer = bp_rialto::AccountId; - - type AccountIdConverter = bp_millau::AccountIdConverter; - - type TargetHeaderChain = crate::rialto_messages::Rialto; - type LaneMessageVerifier = crate::rialto_messages::ToRialtoMessageVerifier; - type MessageDeliveryAndDispatchPayment = - pallet_bridge_messages::instant_payments::InstantCurrencyPayments< - Runtime, - WithRialtoMessagesInstance, - pallet_balances::Pallet, - GetDeliveryConfirmationTransactionFee, - >; - type OnMessageAccepted = (); - type OnDeliveryConfirmed = - pallet_bridge_token_swap::Pallet; - - type SourceHeaderChain = crate::rialto_messages::Rialto; - type MessageDispatch = crate::rialto_messages::FromRialtoMessageDispatch; - type BridgedChainId = RialtoChainId; -} - -parameter_types! { - pub const TokenSwapMessagesLane: bp_messages::LaneId = *b"swap"; -} - -/// Instance of the with-Rialto token swap pallet. -pub type WithRialtoTokenSwapInstance = (); - -impl pallet_bridge_token_swap::Config for Runtime { - type Event = Event; - type WeightInfo = (); - - type BridgedChainId = RialtoChainId; - type OutboundMessageLaneId = TokenSwapMessagesLane; - #[cfg(not(feature = "runtime-benchmarks"))] - type MessagesBridge = pallet_bridge_messages::Pallet; - #[cfg(feature = "runtime-benchmarks")] - type MessagesBridge = bp_messages::source_chain::NoopMessagesBridge; - type ThisCurrency = pallet_balances::Pallet; - type FromSwapToThisAccountIdConverter = bp_rialto::AccountIdConverter; - - type BridgedChain = bp_rialto::Rialto; - type FromBridgedToThisAccountIdConverter = bp_millau::AccountIdConverter; -} - -construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, - - // Must be before session. - Aura: pallet_aura::{Pallet, Config}, - - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - - // Consensus support. - Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, - ShiftSessionManager: pallet_shift_session_manager::{Pallet}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, - - // BEEFY Bridges support. - Beefy: pallet_beefy::{Pallet, Storage, Config}, - Mmr: pallet_mmr::{Pallet, Storage}, - MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, - - // Rialto bridge modules. - BridgeRialtoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, - BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, - BridgeRialtoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, - BridgeRialtoTokenSwap: pallet_bridge_token_swap::{Pallet, Call, Storage, Event, Origin}, - - // Westend bridge modules. - BridgeWestendGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Config, Storage}, - } -); - -/// The address format for describing accounts. -pub type Address = AccountId; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// `BlockId` type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); -/// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -/// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - define_benchmarks!( - [pallet_bridge_token_swap, BridgeRialtoTokenSwap] - ); -} -type MmrHashing = ::Hashing; - -impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block); - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { - System::account_nonce(account) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().to_vec() - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< - Block, - Balance, - > for Runtime { - fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl beefy_primitives::BeefyApi for Runtime { - fn validator_set() -> Option> { - Beefy::validator_set() - } - } - - impl sp_mmr_primitives::MmrApi for Runtime { - fn generate_proof(leaf_index: MmrLeafIndex) - -> Result<(EncodableOpaqueLeaf, MmrProof), MmrError> - { - Mmr::generate_batch_proof(vec![leaf_index]) - .and_then(|(leaves, proof)| Ok(( - EncodableOpaqueLeaf::from_leaf(&leaves[0]), - MmrBatchProof::into_single_leaf_proof(proof)? - ))) - - } - - fn verify_proof(leaf: EncodableOpaqueLeaf, proof: MmrProof) - -> Result<(), MmrError> - { - - type MmrLeaf = <::LeafData as LeafDataProvider>::LeafData; - let leaf: MmrLeaf = leaf - .into_opaque_leaf() - .try_decode() - .ok_or(MmrError::Verify)?; - Mmr::verify_leaves(vec![leaf], MmrProof::into_batch_proof(proof)) - } - - fn verify_proof_stateless( - root: MmrHash, - leaf: EncodableOpaqueLeaf, - proof: MmrProof - ) -> Result<(), MmrError> { - let node = DataOrHash::Data(leaf.into_opaque_leaf()); - pallet_mmr::verify_leaves_proof::(root, vec![node], MmrProof::into_batch_proof(proof)) - } - - fn generate_batch_proof(leaf_indices: Vec) - -> Result<(Vec, MmrBatchProof), MmrError> - { - Mmr::generate_batch_proof(leaf_indices) - .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) - } - - fn verify_batch_proof(leaves: Vec, proof: MmrBatchProof) - -> Result<(), MmrError> - { - type MmrLeaf = <::LeafData as LeafDataProvider>::LeafData; - let leaves = leaves.into_iter().map(|leaf| - leaf.into_opaque_leaf() - .try_decode() - .ok_or(MmrError::Verify)).collect::, MmrError>>()?; - Mmr::verify_leaves(leaves, proof) - } - - fn verify_batch_proof_stateless( - root: MmrHash, - leaves: Vec, - proof: MmrBatchProof - ) -> Result<(), MmrError> { - let nodes = leaves.into_iter().map(|leaf|DataOrHash::Data(leaf.into_opaque_leaf())).collect(); - pallet_mmr::verify_leaves_proof::(root, nodes, proof) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() - } - - fn grandpa_authorities() -> GrandpaAuthorityList { - Grandpa::grandpa_authorities() - } - - fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: fg_primitives::EquivocationProof< - ::Hash, - NumberFor, - >, - key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, - ) -> Option<()> { - let key_owner_proof = key_owner_proof.decode()?; - - Grandpa::submit_unsigned_equivocation_report( - equivocation_proof, - key_owner_proof, - ) - } - - fn generate_key_ownership_proof( - _set_id: fg_primitives::SetId, - _authority_id: GrandpaId, - ) -> Option { - // NOTE: this is the only implementation possible since we've - // defined our key owner proof type as a bottom type (i.e. a type - // with no values). - None - } - } - - impl bp_rialto::RialtoFinalityApi for Runtime { - fn best_finalized() -> (bp_rialto::BlockNumber, bp_rialto::Hash) { - let header = BridgeRialtoGrandpa::best_finalized(); - (header.number, header.hash()) - } - } - - impl bp_westend::WestendFinalityApi for Runtime { - fn best_finalized() -> (bp_westend::BlockNumber, bp_westend::Hash) { - let header = BridgeWestendGrandpa::best_finalized(); - (header.number, header.hash()) - } - } - - impl bp_rialto::ToRialtoOutboundLaneApi for Runtime { - fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_messages::LaneId, - payload: ToRialtoMessagePayload, - rialto_to_this_conversion_rate: Option, - ) -> Option { - estimate_message_dispatch_and_delivery_fee::( - &payload, - WithRialtoMessageBridge::RELAYER_FEE_PERCENT, - rialto_to_this_conversion_rate, - ).ok() - } - - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec> { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - WithRialtoMessagesInstance, - WithRialtoMessageBridge, - >(lane, begin, end) - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn benchmark_metadata(extra: bool) -> ( - Vec, - Vec, - ) { - use frame_benchmarking::{Benchmarking, BenchmarkList}; - use frame_support::traits::StorageInfoTrait; - - use pallet_bridge_messages::benchmarking::Pallet as MessagesBench; - - let mut list = Vec::::new(); - - list_benchmark!(list, extra, pallet_bridge_token_swap, BridgeRialtoTokenSwap); - list_benchmark!(list, extra, pallet_bridge_messages, MessagesBench::); - list_benchmark!(list, extra, pallet_bridge_grandpa, BridgeRialtoGrandpa); - - let storage_info = AllPalletsWithSystem::storage_info(); - return (list, storage_info) - } - - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig, - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, TrackedStorageKey}; - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - // Caller 0 Account - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da946c154ffd9992e395af90b5b13cc6f295c77033fce8a9045824a6690bbf99c6db269502f0a8d1d2a008542d5690a0749").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - - use bridge_runtime_common::messages_benchmarking::{prepare_message_delivery_proof, prepare_message_proof, prepare_outbound_message}; - use bridge_runtime_common::messages; - use pallet_bridge_messages::benchmarking::{ - Pallet as MessagesBench, - Config as MessagesConfig, - MessageDeliveryProofParams, - MessageParams, - MessageProofParams, - }; - use rialto_messages::WithRialtoMessageBridge; - - impl MessagesConfig for Runtime { - fn maximal_message_size() -> u32 { - messages::source::maximal_message_size::() - } - - fn bridged_relayer_id() -> Self::InboundRelayer { - [0u8; 32].into() - } - - fn account_balance(account: &Self::AccountId) -> Self::OutboundMessageFee { - pallet_balances::Pallet::::free_balance(account) - } - - fn endow_account(account: &Self::AccountId) { - pallet_balances::Pallet::::make_free_balance_be( - account, - Balance::MAX / 100, - ); - } - - fn prepare_outbound_message( - params: MessageParams, - ) -> (rialto_messages::ToRialtoMessagePayload, Balance) { - (prepare_outbound_message::(params), Self::message_fee()) - } - - fn prepare_message_proof( - params: MessageProofParams, - ) -> (rialto_messages::FromRialtoMessagesProof, Weight) { - prepare_message_proof::( - params, - &VERSION, - Balance::MAX / 100, - ) - } - - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> rialto_messages::ToRialtoMessagesDeliveryProof { - prepare_message_delivery_proof::( - params, - ) - } - - fn is_message_dispatched(nonce: bp_messages::MessageNonce) -> bool { - frame_system::Pallet::::events() - .into_iter() - .map(|event_record| event_record.event) - .any(|event| matches!( - event, - Event::BridgeDispatch(pallet_bridge_dispatch::Event::::MessageDispatched( - _, ([0, 0, 0, 0], nonce_from_event), _, - )) if nonce_from_event == nonce - )) - } - } - - use pallet_bridge_token_swap::benchmarking::Config as TokenSwapConfig; - - impl TokenSwapConfig for Runtime { - fn initialize_environment() { - let relayers_fund_account = pallet_bridge_messages::relayer_fund_account_id::< - bp_millau::AccountId, - bp_millau::AccountIdConverter, - >(); - pallet_balances::Pallet::::make_free_balance_be( - &relayers_fund_account, - Balance::MAX / 100, - ); - } - } - - add_benchmark!( - params, - batches, - pallet_bridge_messages, - MessagesBench:: - ); - add_benchmark!(params, batches, pallet_bridge_grandpa, BridgeRialtoGrandpa); - add_benchmark!(params, batches, pallet_bridge_token_swap, BridgeRialtoTokenSwap); - - Ok(batches) - } - } -} - -/// Rialto account ownership digest from Millau. -/// -/// The byte vector returned by this function should be signed with a Rialto account private key. -/// This way, the owner of `millau_account_id` on Millau proves that the Rialto account private key -/// is also under his control. -pub fn millau_to_rialto_account_ownership_digest( - rialto_call: &Call, - millau_account_id: AccountId, - rialto_spec_version: SpecVersion, -) -> sp_std::vec::Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_dispatch::account_ownership_digest( - rialto_call, - millau_account_id, - rialto_spec_version, - bp_runtime::MILLAU_CHAIN_ID, - bp_runtime::RIALTO_CHAIN_ID, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn call_size() { - const BRIDGES_PALLETS_MAX_CALL_SIZE: usize = 200; - assert!( - core::mem::size_of::>() <= - BRIDGES_PALLETS_MAX_CALL_SIZE - ); - assert!( - core::mem::size_of::>() <= - BRIDGES_PALLETS_MAX_CALL_SIZE - ); - const MAX_CALL_SIZE: usize = 230; // value from polkadot-runtime tests - assert!(core::mem::size_of::() <= MAX_CALL_SIZE); - } -} diff --git a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs b/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs deleted file mode 100644 index d925d805dd0..00000000000 --- a/polkadot/bridges/bin/millau/runtime/src/rialto_messages.rs +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to serve Millau <-> Rialto messages. - -use crate::Runtime; - -use bp_messages::{ - source_chain::{SenderOrigin, TargetHeaderChain}, - target_chain::{ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, -}; -use bp_runtime::{Chain, ChainId, MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; -use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; -use codec::{Decode, Encode}; -use frame_support::{ - parameter_types, - weights::{DispatchClass, Weight}, - RuntimeDebug, -}; -use scale_info::TypeInfo; -use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128}; -use sp_std::{convert::TryFrom, ops::RangeInclusive}; - -/// Initial value of `RialtoToMillauConversionRate` parameter. -pub const INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE: FixedU128 = - FixedU128::from_inner(FixedU128::DIV); -/// Initial value of `RialtoFeeMultiplier` parameter. -pub const INITIAL_RIALTO_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV); - -parameter_types! { - /// Rialto to Millau conversion rate. Initially we treat both tokens as equal. - pub storage RialtoToMillauConversionRate: FixedU128 = INITIAL_RIALTO_TO_MILLAU_CONVERSION_RATE; - /// Fee multiplier value at Rialto chain. - pub storage RialtoFeeMultiplier: FixedU128 = INITIAL_RIALTO_FEE_MULTIPLIER; -} - -/// Message payload for Millau -> Rialto messages. -pub type ToRialtoMessagePayload = - messages::source::FromThisChainMessagePayload; - -/// Message verifier for Millau -> Rialto messages. -pub type ToRialtoMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Message payload for Rialto -> Millau messages. -pub type FromRialtoMessagePayload = - messages::target::FromBridgedChainMessagePayload; - -/// Encoded Millau Call as it comes from Rialto. -pub type FromRialtoEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; - -/// Messages proof for Rialto -> Millau messages. -pub type FromRialtoMessagesProof = messages::target::FromBridgedChainMessagesProof; - -/// Messages delivery proof for Millau -> Rialto messages. -pub type ToRialtoMessagesDeliveryProof = - messages::source::FromBridgedChainMessagesDeliveryProof; - -/// Call-dispatch based message dispatch for Rialto -> Millau messages. -pub type FromRialtoMessageDispatch = messages::target::FromBridgedChainMessageDispatch< - WithRialtoMessageBridge, - crate::Runtime, - pallet_balances::Pallet, - (), ->; - -/// Millau <-> Rialto message bridge. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct WithRialtoMessageBridge; - -impl MessageBridge for WithRialtoMessageBridge { - const RELAYER_FEE_PERCENT: u32 = 10; - const THIS_CHAIN_ID: ChainId = MILLAU_CHAIN_ID; - const BRIDGED_CHAIN_ID: ChainId = RIALTO_CHAIN_ID; - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = bp_millau::WITH_MILLAU_MESSAGES_PALLET_NAME; - - type ThisChain = Millau; - type BridgedChain = Rialto; - - fn bridged_balance_to_this_balance( - bridged_balance: bp_rialto::Balance, - bridged_to_this_conversion_rate_override: Option, - ) -> bp_millau::Balance { - let conversion_rate = bridged_to_this_conversion_rate_override - .unwrap_or_else(|| RialtoToMillauConversionRate::get()); - bp_millau::Balance::try_from(conversion_rate.saturating_mul_int(bridged_balance)) - .unwrap_or(bp_millau::Balance::MAX) - } -} - -/// Millau chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Millau; - -impl messages::ChainWithMessages for Millau { - type Hash = bp_millau::Hash; - type AccountId = bp_millau::AccountId; - type Signer = bp_millau::AccountSigner; - type Signature = bp_millau::Signature; - type Weight = Weight; - type Balance = bp_millau::Balance; -} - -impl messages::ThisChainWithMessages for Millau { - type Origin = crate::Origin; - type Call = crate::Call; - - fn is_message_accepted(send_origin: &Self::Origin, lane: &LaneId) -> bool { - // lanes 0x00000000 && 0x00000001 are accepting any paid messages, while - // `TokenSwapMessageLane` only accepts messages from token swap pallet - let token_swap_dedicated_lane = crate::TokenSwapMessagesLane::get(); - match *lane { - [0, 0, 0, 0] | [0, 0, 0, 1] => send_origin.linked_account().is_some(), - _ if *lane == token_swap_dedicated_lane => matches!( - send_origin.caller, - crate::OriginCaller::BridgeRialtoTokenSwap( - pallet_bridge_token_swap::RawOrigin::TokenSwap { .. } - ) - ), - _ => false, - } - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - MessageNonce::MAX - } - - fn estimate_delivery_confirmation_transaction() -> MessageTransaction { - let inbound_data_size = InboundLaneData::::encoded_size_hint( - bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - 1, - 1, - ) - .unwrap_or(u32::MAX); - - MessageTransaction { - dispatch_weight: bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - size: inbound_data_size - .saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE) - .saturating_add(bp_millau::TX_EXTRA_BYTES), - } - } - - fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { - // `transaction` may represent transaction from the future, when multiplier value will - // be larger, so let's use slightly increased value - let multiplier = FixedU128::saturating_from_rational(110, 100) - .saturating_mul(pallet_transaction_payment::Pallet::::next_fee_multiplier()); - // in our testnets, both per-byte fee and weight-to-fee are 1:1 - messages::transaction_payment( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - 1, - multiplier, - |weight| weight as _, - transaction, - ) - } -} - -/// Rialto chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Rialto; - -impl messages::ChainWithMessages for Rialto { - type Hash = bp_rialto::Hash; - type AccountId = bp_rialto::AccountId; - type Signer = bp_rialto::AccountSigner; - type Signature = bp_rialto::Signature; - type Weight = Weight; - type Balance = bp_rialto::Balance; -} - -impl messages::BridgedChainWithMessages for Rialto { - fn maximal_extrinsic_size() -> u32 { - bp_rialto::Rialto::max_extrinsic_size() - } - - fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { - // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight( - bp_rialto::Rialto::max_extrinsic_weight(), - ); - - // we're charging for payload bytes in `WithRialtoMessageBridge::transaction_payment` - // function - // - // this bridge may be used to deliver all kind of messages, so we're not making any - // assumptions about minimal dispatch weight here - - 0..=upper_limit - } - - fn estimate_delivery_transaction( - message_payload: &[u8], - include_pay_dispatch_fee_cost: bool, - message_dispatch_weight: Weight, - ) -> MessageTransaction { - let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); - let extra_bytes_in_payload = Weight::from(message_payload_len) - .saturating_sub(pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); - - MessageTransaction { - dispatch_weight: extra_bytes_in_payload - .saturating_mul(bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) - .saturating_add(bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) - .saturating_sub(if include_pay_dispatch_fee_cost { - 0 - } else { - bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT - }) - .saturating_add(message_dispatch_weight), - size: message_payload_len - .saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE) - .saturating_add(bp_rialto::TX_EXTRA_BYTES), - } - } - - fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { - // we don't have a direct access to the value of multiplier at Rialto chain - // => it is a messages module parameter - let multiplier = RialtoFeeMultiplier::get(); - // in our testnets, both per-byte fee and weight-to-fee are 1:1 - messages::transaction_payment( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - 1, - multiplier, - |weight| weight as _, - transaction, - ) - } -} - -impl TargetHeaderChain for Rialto { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof or one or several keys; - // - id of the lane we prove state of. - type MessagesDeliveryProof = ToRialtoMessagesDeliveryProof; - - fn verify_message(payload: &ToRialtoMessagePayload) -> Result<(), Self::Error> { - messages::source::verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - messages::source::verify_messages_delivery_proof::< - WithRialtoMessageBridge, - Runtime, - crate::RialtoGrandpaInstance, - >(proof) - } -} - -impl SourceHeaderChain for Rialto { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof or one or several keys; - // - id of the lane we prove messages for; - // - inclusive range of messages nonces that are proved. - type MessagesProof = FromRialtoMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result>, Self::Error> { - messages::target::verify_messages_proof::< - WithRialtoMessageBridge, - Runtime, - crate::RialtoGrandpaInstance, - >(proof, messages_count) - } -} - -impl SenderOrigin for crate::Origin { - fn linked_account(&self) -> Option { - match self.caller { - crate::OriginCaller::system(frame_system::RawOrigin::Signed(ref submitter)) => - Some(submitter.clone()), - crate::OriginCaller::system(frame_system::RawOrigin::Root) | - crate::OriginCaller::system(frame_system::RawOrigin::None) => - crate::RootAccountForPayments::get(), - crate::OriginCaller::BridgeRialtoTokenSwap( - pallet_bridge_token_swap::RawOrigin::TokenSwap { - ref swap_account_at_this_chain, - .. - }, - ) => Some(swap_account_at_this_chain.clone()), - _ => None, - } - } -} - -/// Millau -> Rialto message lane pallet parameters. -#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] -pub enum MillauToRialtoMessagesParameter { - /// The conversion formula we use is: `MillauTokens = RialtoTokens * conversion_rate`. - RialtoToMillauConversionRate(FixedU128), -} - -impl MessagesParameter for MillauToRialtoMessagesParameter { - fn save(&self) { - match *self { - MillauToRialtoMessagesParameter::RialtoToMillauConversionRate(ref conversion_rate) => - RialtoToMillauConversionRate::set(conversion_rate), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{DbWeight, RialtoGrandpaInstance, Runtime, WithRialtoMessagesInstance}; - - use bp_runtime::Chain; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, AssertBridgeMessagesPalletConstants, - AssertBridgePalletNames, AssertChainConstants, AssertCompleteBridgeConstants, - }, - messages, - }; - - #[test] - fn ensure_millau_message_lane_weights_are_correct() { - type Weights = pallet_bridge_messages::weights::MillauWeight; - - pallet_bridge_messages::ensure_weights_are_correct::( - bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, - bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, - bp_millau::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT, - DbWeight::get(), - ); - - let max_incoming_message_proof_size = bp_rialto::EXTRA_STORAGE_PROOF_SIZE.saturating_add( - messages::target::maximal_incoming_message_size(bp_millau::Millau::max_extrinsic_size()), - ); - pallet_bridge_messages::ensure_able_to_receive_message::( - bp_millau::Millau::max_extrinsic_size(), - bp_millau::Millau::max_extrinsic_weight(), - max_incoming_message_proof_size, - messages::target::maximal_incoming_message_dispatch_weight( - bp_millau::Millau::max_extrinsic_weight(), - ), - ); - - let max_incoming_inbound_lane_data_proof_size = - bp_messages::InboundLaneData::<()>::encoded_size_hint( - bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - bp_millau::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as _, - bp_millau::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX as _, - ) - .unwrap_or(u32::MAX); - pallet_bridge_messages::ensure_able_to_receive_confirmation::( - bp_millau::Millau::max_extrinsic_size(), - bp_millau::Millau::max_extrinsic_weight(), - max_incoming_inbound_lane_data_proof_size, - bp_millau::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_millau::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - DbWeight::get(), - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: RialtoGrandpaInstance, - with_bridged_chain_messages_instance: WithRialtoMessagesInstance, - bridge: WithRialtoMessageBridge, - this_chain: bp_millau::Millau, - bridged_chain: bp_rialto::Rialto, - this_chain_account_id_converter: bp_millau::AccountIdConverter - ); - - assert_complete_bridge_constants::< - Runtime, - RialtoGrandpaInstance, - WithRialtoMessagesInstance, - WithRialtoMessageBridge, - bp_millau::Millau, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_millau::BlockLength::get(), - block_weights: bp_millau::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_rialto::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::RIALTO_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: bp_millau::WITH_MILLAU_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_rialto::WITH_RIALTO_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_rialto::WITH_RIALTO_MESSAGES_PALLET_NAME, - }, - }); - - assert_eq!( - RialtoToMillauConversionRate::key().to_vec(), - bp_runtime::storage_parameter_key( - bp_millau::RIALTO_TO_MILLAU_CONVERSION_RATE_PARAMETER_NAME - ) - .0, - ); - } -} diff --git a/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml b/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml deleted file mode 100644 index 41021a35ed2..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/Cargo.toml +++ /dev/null @@ -1,91 +0,0 @@ -[package] -name = "rialto-parachain-collator" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[[bin]] -name = 'rialto-parachain-collator' - -[features] -default = [] -runtime-benchmarks = ['rialto-parachain-runtime/runtime-benchmarks'] - -[dependencies] -clap = { version = "3.1", features = ["derive"] } -derive_more = '0.99.2' -log = '0.4.14' -codec = { package = 'parity-scale-codec', version = '3.0.0' } -serde = { version = '1.0', features = ['derive'] } -hex-literal = '0.3.1' - -# RPC related Dependencies -jsonrpc-core = '18.0' - -# Local Dependencies -rialto-parachain-runtime = { path = '../runtime' } - -# Substrate Dependencies -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } - -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } - -substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } - -## Substrate Client Dependencies -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", features = ['wasmtime'] } -sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } - -## Substrate Primitive Dependencies -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } - -# Cumulus dependencies -cumulus-client-consensus-aura = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-client-collator = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-client-network = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-client-service = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-primitives-parachain-inherent = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-relay-chain-interface = { git = "https://github.com/paritytech/cumulus", branch = "master" } -cumulus-relay-chain-inprocess-interface = { git = "https://github.com/paritytech/cumulus", branch = "master" } - -# Polkadot dependencies -polkadot-cli = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-test-service = { git = "https://github.com/paritytech/polkadot", branch = "master" } \ No newline at end of file diff --git a/polkadot/bridges/bin/rialto-parachain/node/build.rs b/polkadot/bridges/bin/rialto-parachain/node/build.rs deleted file mode 100644 index 8ba8a31e9a7..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/build.rs +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; - -fn main() { - generate_cargo_keys(); - rerun_if_git_head_changed(); -} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs deleted file mode 100644 index 6a8e751677d..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/src/chain_spec.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use cumulus_primitives_core::ParaId; -use rialto_parachain_runtime::{AccountId, AuraId, Signature}; -use sc_chain_spec::{ChainSpecExtension, ChainSpecGroup}; -use sc_service::ChainType; -use serde::{Deserialize, Serialize}; -use sp_core::{sr25519, Pair, Public}; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -/// Specialized `ChainSpec` for the normal parachain runtime. -pub type ChainSpec = - sc_service::GenericChainSpec; - -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -/// The extensions for the [`ChainSpec`]. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] -#[serde(deny_unknown_fields)] -pub struct Extensions { - /// The relay chain of the Parachain. - pub relay_chain: String, - /// The id of the Parachain. - pub para_id: u32, -} - -impl Extensions { - /// Try to get the extension from the given `ChainSpec`. - pub fn try_get(chain_spec: &dyn sc_service::ChainSpec) -> Option<&Self> { - sc_chain_spec::get_extension(chain_spec.extensions()) - } -} - -type AccountPublic = ::Signer; - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -pub fn development_config(id: ParaId) -> ChainSpec { - // Give your base currency a unit name and decimal places - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("tokenSymbol".into(), "UNIT".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - ChainSpec::from_genesis( - // Name - "Development", - // ID - "dev", - ChainType::Local, - move || { - testnet_genesis( - get_account_id_from_seed::("Alice"), - vec![get_from_seed::("Alice"), get_from_seed::("Bob")], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - id, - ) - }, - vec![], - None, - None, - None, - None, - Extensions { - relay_chain: "rococo-local".into(), // You MUST set this to the correct network! - para_id: id.into(), - }, - ) -} - -pub fn local_testnet_config(id: ParaId) -> ChainSpec { - // Give your base currency a unit name and decimal places - let mut properties = sc_chain_spec::Properties::new(); - properties.insert("tokenSymbol".into(), "UNIT".into()); - properties.insert("tokenDecimals".into(), 12.into()); - - ChainSpec::from_genesis( - // Name - "Local Testnet", - // ID - "local_testnet", - ChainType::Local, - move || { - testnet_genesis( - get_account_id_from_seed::("Alice"), - vec![get_from_seed::("Alice"), get_from_seed::("Bob")], - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - id, - ) - }, - Vec::new(), - None, - None, - None, - None, - Extensions { - relay_chain: "rococo-local".into(), // You MUST set this to the correct network! - para_id: id.into(), - }, - ) -} - -fn testnet_genesis( - root_key: AccountId, - initial_authorities: Vec, - endowed_accounts: Vec, - id: ParaId, -) -> rialto_parachain_runtime::GenesisConfig { - rialto_parachain_runtime::GenesisConfig { - system: rialto_parachain_runtime::SystemConfig { - code: rialto_parachain_runtime::WASM_BINARY - .expect("WASM binary was not build, please build it!") - .to_vec(), - }, - balances: rialto_parachain_runtime::BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 60)).collect(), - }, - sudo: rialto_parachain_runtime::SudoConfig { key: Some(root_key) }, - parachain_info: rialto_parachain_runtime::ParachainInfoConfig { parachain_id: id }, - aura: rialto_parachain_runtime::AuraConfig { authorities: initial_authorities }, - aura_ext: Default::default(), - // parachain_system: Default::default(), - } -} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs b/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs deleted file mode 100644 index 89d049f022e..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/src/cli.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::chain_spec; -use clap::Parser; -use std::path::PathBuf; - -/// Sub-commands supported by the collator. -#[derive(Debug, Parser)] -pub enum Subcommand { - /// Export the genesis state of the parachain. - #[clap(name = "export-genesis-state")] - ExportGenesisState(ExportGenesisStateCommand), - - /// Export the genesis wasm of the parachain. - #[clap(name = "export-genesis-wasm")] - ExportGenesisWasm(ExportGenesisWasmCommand), - - /// Build a chain specification. - BuildSpec(sc_cli::BuildSpecCmd), - - /// Validate blocks. - CheckBlock(sc_cli::CheckBlockCmd), - - /// Export blocks. - ExportBlocks(sc_cli::ExportBlocksCmd), - - /// Export the state of a given block into a chain spec. - ExportState(sc_cli::ExportStateCmd), - - /// Import blocks. - ImportBlocks(sc_cli::ImportBlocksCmd), - - /// Remove the whole chain. - PurgeChain(cumulus_client_cli::PurgeChainCmd), - - /// Revert the chain to a previous state. - Revert(sc_cli::RevertCmd), - - /// The custom benchmark subcommmand benchmarking runtime pallets. - #[clap(name = "benchmark", about = "Benchmark runtime pallets.")] - Benchmark(frame_benchmarking_cli::BenchmarkCmd), -} - -/// Command for exporting the genesis state of the parachain -#[derive(Debug, Parser)] -pub struct ExportGenesisStateCommand { - /// Output file name or stdout if unspecified. - #[clap(parse(from_os_str))] - pub output: Option, - - /// Id of the parachain this state is for. - /// - /// Default: 100 - #[clap(long, conflicts_with = "chain")] - pub parachain_id: Option, - - /// Write output in binary. Default is to write in hex. - #[clap(short, long)] - pub raw: bool, - - /// The name of the chain for that the genesis state should be exported. - #[clap(long, conflicts_with = "parachain-id")] - pub chain: Option, -} - -/// Command for exporting the genesis wasm file. -#[derive(Debug, Parser)] -pub struct ExportGenesisWasmCommand { - /// Output file name or stdout if unspecified. - #[clap(parse(from_os_str))] - pub output: Option, - - /// Write output in binary. Default is to write in hex. - #[clap(short, long)] - pub raw: bool, - - /// The name of the chain for that the genesis wasm file should be exported. - #[clap(long)] - pub chain: Option, -} - -#[derive(Debug, Parser)] -#[clap( - propagate_version = true, - args_conflicts_with_subcommands = true, - subcommand_negates_reqs = true -)] -pub struct Cli { - #[clap(subcommand)] - pub subcommand: Option, - - #[clap(long)] - pub parachain_id: Option, - - #[clap(flatten)] - pub run: cumulus_client_cli::RunCmd, - - /// Relaychain arguments - #[clap(raw = true)] - pub relaychain_args: Vec, -} - -#[derive(Debug)] -pub struct RelayChainCli { - /// The actual relay chain cli object. - pub base: polkadot_cli::RunCmd, - - /// Optional chain id that should be passed to the relay chain. - pub chain_id: Option, - - /// The base path that should be used by the relay chain. - pub base_path: Option, -} - -impl RelayChainCli { - /// Parse the relay chain CLI parameters using the para chain `Configuration`. - pub fn new<'a>( - para_config: &sc_service::Configuration, - relay_chain_args: impl Iterator, - ) -> Self { - let extension = chain_spec::Extensions::try_get(&*para_config.chain_spec); - let chain_id = extension.map(|e| e.relay_chain.clone()); - let base_path = para_config.base_path.as_ref().map(|x| x.path().join("rialto-bridge-node")); - Self { base_path, chain_id, base: polkadot_cli::RunCmd::parse_from(relay_chain_args) } - } -} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/command.rs b/polkadot/bridges/bin/rialto-parachain/node/src/command.rs deleted file mode 100644 index 99ff6d2af9e..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/src/command.rs +++ /dev/null @@ -1,441 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - chain_spec, - cli::{Cli, RelayChainCli, Subcommand}, - service::{new_partial, ParachainRuntimeExecutor}, -}; -use codec::Encode; -use cumulus_client_service::genesis::generate_genesis_block; -use cumulus_primitives_core::ParaId; -use log::info; -use rialto_parachain_runtime::{Block, RuntimeApi}; -use sc_cli::{ - ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, - NetworkParams, Result, RuntimeVersion, SharedParams, SubstrateCli, -}; -use sc_service::config::{BasePath, PrometheusConfig}; -use sp_core::hexdisplay::HexDisplay; -use sp_runtime::traits::{Block as BlockT, AccountIdConversion}; -use std::{io::Write, net::SocketAddr}; - -fn load_spec( - id: &str, - para_id: ParaId, -) -> std::result::Result, String> { - Ok(match id { - "dev" => Box::new(chain_spec::development_config(para_id)), - "" | "local" => Box::new(chain_spec::local_testnet_config(para_id)), - path => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), - }) -} - -impl SubstrateCli for Cli { - fn impl_name() -> String { - "Parachain Collator Template".into() - } - - fn impl_version() -> String { - env!("SUBSTRATE_CLI_IMPL_VERSION").into() - } - - fn description() -> String { - format!( - "Parachain Collator Template\n\nThe command-line arguments provided first will be \ - passed to the parachain node, while the arguments provided after -- will be passed \ - to the relaychain node.\n\n\ - {} [parachain-args] -- [relaychain-args]", - Self::executable_name() - ) - } - - fn author() -> String { - env!("CARGO_PKG_AUTHORS").into() - } - - fn support_url() -> String { - "https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into() - } - - fn copyright_start_year() -> i32 { - 2017 - } - - fn load_spec(&self, id: &str) -> std::result::Result, String> { - load_spec(id, self.parachain_id.unwrap_or(2000).into()) - } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &rialto_parachain_runtime::VERSION - } -} - -impl SubstrateCli for RelayChainCli { - fn impl_name() -> String { - "Parachain Collator Template".into() - } - - fn impl_version() -> String { - env!("SUBSTRATE_CLI_IMPL_VERSION").into() - } - - fn description() -> String { - "Parachain Collator Template\n\nThe command-line arguments provided first will be \ - passed to the parachain node, while the arguments provided after -- will be passed \ - to the relaychain node.\n\n\ - parachain-collator [parachain-args] -- [relaychain-args]" - .into() - } - - fn author() -> String { - env!("CARGO_PKG_AUTHORS").into() - } - - fn support_url() -> String { - "https://github.com/substrate-developer-hub/substrate-parachain-template/issues/new".into() - } - - fn copyright_start_year() -> i32 { - 2017 - } - - fn load_spec(&self, id: &str) -> std::result::Result, String> { - polkadot_cli::Cli::from_iter([RelayChainCli::executable_name()].iter()).load_spec(id) - } - - fn native_runtime_version(chain_spec: &Box) -> &'static RuntimeVersion { - polkadot_cli::Cli::native_runtime_version(chain_spec) - } -} - -fn extract_genesis_wasm(chain_spec: &dyn sc_service::ChainSpec) -> Result> { - let mut storage = chain_spec.build_storage()?; - - storage - .top - .remove(sp_core::storage::well_known_keys::CODE) - .ok_or_else(|| "Could not find wasm file in genesis state!".into()) -} - -macro_rules! construct_async_run { - (|$components:ident, $cli:ident, $cmd:ident, $config:ident| $( $code:tt )* ) => {{ - let runner = $cli.create_runner($cmd)?; - runner.async_run(|$config| { - let $components = new_partial::< - RuntimeApi, - ParachainRuntimeExecutor, - _ - >( - &$config, - crate::service::parachain_build_import_queue, - )?; - let task_manager = $components.task_manager; - { $( $code )* }.map(|v| (v, task_manager)) - }) - }} -} - -/// Parse command line arguments into service configuration. -pub fn run() -> Result<()> { - let cli = Cli::from_args(); - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( - rialto_parachain_runtime::SS58Prefix::get() as u16, - )); - - match &cli.subcommand { - Some(Subcommand::BuildSpec(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) - }, - Some(Subcommand::CheckBlock(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) - }) - }, - Some(Subcommand::ExportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| Ok( - cmd.run(components.client, config.database) - )) - }, - Some(Subcommand::ExportState(cmd)) => { - construct_async_run!(|components, cli, cmd, config| Ok( - cmd.run(components.client, config.chain_spec) - )) - }, - Some(Subcommand::ImportBlocks(cmd)) => { - construct_async_run!(|components, cli, cmd, config| { - Ok(cmd.run(components.client, components.import_queue)) - }) - }, - Some(Subcommand::PurgeChain(cmd)) => { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| { - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - - let polkadot_config = SubstrateCli::create_configuration( - &polkadot_cli, - &polkadot_cli, - config.tokio_handle.clone(), - ) - .map_err(|err| format!("Relay chain argument error: {}", err))?; - - cmd.run(config, polkadot_config) - }) - }, - Some(Subcommand::Revert(cmd)) => { - construct_async_run!(|components, cli, cmd, config| Ok( - cmd.run(components.client, components.backend) - )) - }, - Some(Subcommand::ExportGenesisState(params)) => { - let mut builder = sc_cli::LoggerBuilder::new(""); - builder.with_profiling(sc_tracing::TracingReceiver::Log, ""); - let _ = builder.init(); - - let spec = load_spec( - ¶ms.chain.clone().unwrap_or_default(), - params.parachain_id.expect("Missing ParaId").into(), - )?; - let state_version = Cli::native_runtime_version(&spec).state_version(); - let block: Block = generate_genesis_block(&spec, state_version)?; - let raw_header = block.header().encode(); - let output_buf = if params.raw { - raw_header - } else { - format!("0x{:?}", HexDisplay::from(&block.header().encode())).into_bytes() - }; - - if let Some(output) = ¶ms.output { - std::fs::write(output, output_buf)?; - } else { - std::io::stdout().write_all(&output_buf)?; - } - - Ok(()) - }, - Some(Subcommand::ExportGenesisWasm(params)) => { - let mut builder = sc_cli::LoggerBuilder::new(""); - builder.with_profiling(sc_tracing::TracingReceiver::Log, ""); - let _ = builder.init(); - - let raw_wasm_blob = - extract_genesis_wasm(&*cli.load_spec(¶ms.chain.clone().unwrap_or_default())?)?; - let output_buf = if params.raw { - raw_wasm_blob - } else { - format!("0x{:?}", HexDisplay::from(&raw_wasm_blob)).into_bytes() - }; - - if let Some(output) = ¶ms.output { - std::fs::write(output, output_buf)?; - } else { - std::io::stdout().write_all(&output_buf)?; - } - - Ok(()) - }, - Some(Subcommand::Benchmark(cmd)) => - if cfg!(feature = "runtime-benchmarks") { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run::(config)) - } else { - Err("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - .into()) - }, - None => { - let runner = cli.create_runner(&cli.run.normalize())?; - let collator_options = cli.run.collator_options(); - - runner.run_node_until_exit(|config| async move { - let para_id = - chain_spec::Extensions::try_get(&*config.chain_spec).map(|e| e.para_id); - - let polkadot_cli = RelayChainCli::new( - &config, - [RelayChainCli::executable_name()].iter().chain(cli.relaychain_args.iter()), - ); - - let id = ParaId::from(cli.parachain_id.or(para_id).expect("Missing ParaId")); - - let parachain_account = - AccountIdConversion::::into_account(&id); - - let state_version = - RelayChainCli::native_runtime_version(&config.chain_spec).state_version(); - let block: Block = generate_genesis_block(&config.chain_spec, state_version) - .map_err(|e| format!("{:?}", e))?; - let genesis_state = format!("0x{:?}", HexDisplay::from(&block.header().encode())); - - let polkadot_config = SubstrateCli::create_configuration( - &polkadot_cli, - &polkadot_cli, - config.tokio_handle.clone(), - ) - .map_err(|err| format!("Relay chain argument error: {}", err))?; - - info!("Parachain id: {:?}", id); - info!("Parachain Account: {}", parachain_account); - info!("Parachain genesis state: {}", genesis_state); - info!("Is collating: {}", if config.role.is_authority() { "yes" } else { "no" }); - - crate::service::start_node(config, polkadot_config, collator_options, id) - .await - .map(|r| r.0) - .map_err(Into::into) - }) - }, - } -} - -impl DefaultConfigurationValues for RelayChainCli { - fn p2p_listen_port() -> u16 { - 30334 - } - - fn rpc_ws_listen_port() -> u16 { - 9945 - } - - fn rpc_http_listen_port() -> u16 { - 9934 - } - - fn prometheus_listen_port() -> u16 { - 9616 - } -} - -impl CliConfiguration for RelayChainCli { - fn shared_params(&self) -> &SharedParams { - self.base.base.shared_params() - } - - fn import_params(&self) -> Option<&ImportParams> { - self.base.base.import_params() - } - - fn network_params(&self) -> Option<&NetworkParams> { - self.base.base.network_params() - } - - fn keystore_params(&self) -> Option<&KeystoreParams> { - self.base.base.keystore_params() - } - - fn base_path(&self) -> Result> { - Ok(self - .shared_params() - .base_path() - .or_else(|| self.base_path.clone().map(Into::into))) - } - - fn rpc_http(&self, default_listen_port: u16) -> Result> { - self.base.base.rpc_http(default_listen_port) - } - - fn rpc_ipc(&self) -> Result> { - self.base.base.rpc_ipc() - } - - fn rpc_ws(&self, default_listen_port: u16) -> Result> { - self.base.base.rpc_ws(default_listen_port) - } - - fn prometheus_config( - &self, - default_listen_port: u16, - chain_spec: &Box, - ) -> Result> { - self.base.base.prometheus_config(default_listen_port, chain_spec) - } - - fn init( - &self, - _support_url: &String, - _impl_version: &String, - _logger_hook: F, - _config: &sc_service::Configuration, - ) -> Result<()> - where - F: FnOnce(&mut sc_cli::LoggerBuilder, &sc_service::Configuration), - { - unreachable!("PolkadotCli is never initialized; qed"); - } - - fn chain_id(&self, is_dev: bool) -> Result { - let chain_id = self.base.base.chain_id(is_dev)?; - - Ok(if chain_id.is_empty() { self.chain_id.clone().unwrap_or_default() } else { chain_id }) - } - - fn role(&self, is_dev: bool) -> Result { - self.base.base.role(is_dev) - } - - fn transaction_pool(&self) -> Result { - self.base.base.transaction_pool() - } - - fn state_cache_child_ratio(&self) -> Result> { - self.base.base.state_cache_child_ratio() - } - - fn rpc_methods(&self) -> Result { - self.base.base.rpc_methods() - } - - fn rpc_ws_max_connections(&self) -> Result> { - self.base.base.rpc_ws_max_connections() - } - - fn rpc_cors(&self, is_dev: bool) -> Result>> { - self.base.base.rpc_cors(is_dev) - } - - fn default_heap_pages(&self) -> Result> { - self.base.base.default_heap_pages() - } - - fn force_authoring(&self) -> Result { - self.base.base.force_authoring() - } - - fn disable_grandpa(&self) -> Result { - self.base.base.disable_grandpa() - } - - fn max_runtime_instances(&self) -> Result> { - self.base.base.max_runtime_instances() - } - - fn announce_block(&self) -> Result { - self.base.base.announce_block() - } - - fn telemetry_endpoints( - &self, - chain_spec: &Box, - ) -> Result> { - self.base.base.telemetry_endpoints(chain_spec) - } -} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs b/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs deleted file mode 100644 index 3ec291596b7..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/src/lib.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -pub mod chain_spec; -pub mod service; diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/main.rs b/polkadot/bridges/bin/rialto-parachain/node/src/main.rs deleted file mode 100644 index 2b4e0b438d1..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/src/main.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate Parachain Node Template CLI - -#![warn(missing_docs)] - -mod chain_spec; -#[macro_use] -mod service; -mod cli; -mod command; - -fn main() -> sc_cli::Result<()> { - command::run() -} diff --git a/polkadot/bridges/bin/rialto-parachain/node/src/service.rs b/polkadot/bridges/bin/rialto-parachain/node/src/service.rs deleted file mode 100644 index a2299e17457..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/node/src/service.rs +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto parachain node service. -//! -//! The code is mostly copy of `polkadot-parachains/src/service.rs` file from Cumulus -//! repository with some parts removed. We have added two RPC extensions to the original -//! service: `pallet_transaction_payment_rpc::TransactionPaymentApi` and -//! `substrate_frame_rpc_system::SystemApi`. - -// std -use std::{sync::Arc, time::Duration}; - -// Local Runtime Types -use rialto_parachain_runtime::RuntimeApi; - -// Cumulus Imports -use cumulus_client_cli::CollatorOptions; -use cumulus_client_consensus_aura::{AuraConsensus, BuildAuraConsensusParams, SlotProportion}; -use cumulus_client_consensus_common::ParachainConsensus; -use cumulus_client_network::BlockAnnounceValidator; -use cumulus_client_service::{ - prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams, -}; -use cumulus_primitives_core::ParaId; -use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; -use cumulus_relay_chain_interface::{RelayChainError, RelayChainInterface}; - -// Substrate Imports -use sc_client_api::ExecutorProvider; -use sc_executor::{NativeElseWasmExecutor, NativeExecutionDispatch}; -use sc_network::NetworkService; -use sc_service::{Configuration, PartialComponents, Role, TFullBackend, TFullClient, TaskManager}; -use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle}; -use sp_api::ConstructRuntimeApi; -use sp_keystore::SyncCryptoStorePtr; -use sp_runtime::traits::BlakeTwo256; -use substrate_prometheus_endpoint::Registry; - -// Runtime type overrides -type BlockNumber = u32; -type Header = sp_runtime::generic::Header; -pub type Block = sp_runtime::generic::Block; -type Hash = sp_core::H256; - -pub type ParachainRuntimeExecutor = ExecutorDispatch; - -// Our native executor instance. -pub struct ExecutorDispatch; - -impl NativeExecutionDispatch for ExecutorDispatch { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - rialto_parachain_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - rialto_parachain_runtime::native_version() - } -} - -/// Starts a `ServiceBuilder` for a full service. -/// -/// Use this macro if you don't actually need the full service, but just the builder in order to -/// be able to perform chain operations. -#[allow(clippy::type_complexity)] -pub fn new_partial( - config: &Configuration, - build_import_queue: BIQ, -) -> Result< - PartialComponents< - TFullClient>, - TFullBackend, - (), - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_transaction_pool::FullPool< - Block, - TFullClient>, - >, - (Option, Option), - >, - sc_service::Error, -> -where - RuntimeApi: ConstructRuntimeApi>> - + Send - + Sync - + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: NativeExecutionDispatch + 'static, - BIQ: FnOnce( - Arc>>, - &Configuration, - Option, - &TaskManager, - ) -> Result< - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_service::Error, - >, -{ - let telemetry = config - .telemetry_endpoints - .clone() - .filter(|x| !x.is_empty()) - .map(|endpoints| -> Result<_, sc_telemetry::Error> { - let worker = TelemetryWorker::new(16)?; - let telemetry = worker.handle().new_telemetry(endpoints); - Ok((worker, telemetry)) - }) - .transpose()?; - - let executor = sc_executor::NativeElseWasmExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - config.runtime_cache_size, - ); - - let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::( - config, - telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), - executor, - )?; - let client = Arc::new(client); - - let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle()); - - let telemetry = telemetry.map(|(worker, telemetry)| { - task_manager.spawn_handle().spawn("telemetry", None, worker.run()); - telemetry - }); - - let transaction_pool = sc_transaction_pool::BasicPool::new_full( - config.transaction_pool.clone(), - config.role.is_authority().into(), - config.prometheus_registry(), - task_manager.spawn_essential_handle(), - client.clone(), - ); - - let import_queue = build_import_queue( - client.clone(), - config, - telemetry.as_ref().map(|telemetry| telemetry.handle()), - &task_manager, - )?; - - let params = PartialComponents { - backend, - client, - import_queue, - keystore_container, - task_manager, - transaction_pool, - select_chain: (), - other: (telemetry, telemetry_worker_handle), - }; - - Ok(params) -} - -/// Start a node with the given parachain `Configuration` and relay chain `Configuration`. -/// -/// This is the actual implementation that is abstract over the executor and the runtime api. -#[sc_tracing::logging::prefix_logs_with("Parachain")] -async fn start_node_impl( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - id: ParaId, - rpc_ext_builder: RB, - build_import_queue: BIQ, - build_consensus: BIC, -) -> sc_service::error::Result<( - TaskManager, - Arc>>, -)> -where - RuntimeApi: ConstructRuntimeApi>> - + Send - + Sync - + 'static, - RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue - + sp_api::Metadata - + sp_session::SessionKeys - + sp_api::ApiExt< - Block, - StateBackend = sc_client_api::StateBackendFor, Block>, - > + sp_offchain::OffchainWorkerApi - + sp_block_builder::BlockBuilder - + cumulus_primitives_core::CollectCollationInfo, - sc_client_api::StateBackendFor, Block>: sp_api::StateBackend, - Executor: NativeExecutionDispatch + 'static, - RB: Fn( - sc_rpc_api::DenyUnsafe, - Arc>>, - Arc< - sc_transaction_pool::FullPool< - Block, - TFullClient>, - >, - >, - ) -> jsonrpc_core::IoHandler - + Send - + 'static, - BIQ: FnOnce( - Arc>>, - &Configuration, - Option, - &TaskManager, - ) -> Result< - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_service::Error, - >, - BIC: FnOnce( - Arc>>, - Option<&Registry>, - Option, - &TaskManager, - Arc, - Arc< - sc_transaction_pool::FullPool< - Block, - TFullClient>, - >, - >, - Arc>, - SyncCryptoStorePtr, - bool, - ) -> Result>, sc_service::Error>, -{ - if matches!(parachain_config.role, Role::Light) { - return Err("Light client not supported!".into()) - } - - let parachain_config = prepare_node_config(parachain_config); - - let params = new_partial::(¶chain_config, build_import_queue)?; - let (mut telemetry, telemetry_worker_handle) = params.other; - - let mut task_manager = params.task_manager; - let (relay_chain_interface, collator_key) = build_inprocess_relay_chain( - polkadot_config, - ¶chain_config, - telemetry_worker_handle, - &mut task_manager, - ) - .map_err(|e| match e { - RelayChainError::ServiceError(polkadot_service::Error::Sub(x)) => x, - s => s.to_string().into(), - })?; - - let client = params.client.clone(); - let backend = params.backend.clone(); - let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface.clone(), id); - - let force_authoring = parachain_config.force_authoring; - let validator = parachain_config.role.is_authority(); - let prometheus_registry = parachain_config.prometheus_registry().cloned(); - let transaction_pool = params.transaction_pool.clone(); - let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue); - let (network, system_rpc_tx, start_network) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: ¶chain_config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue: import_queue.clone(), - block_announce_validator_builder: Some(Box::new(|_| { - Box::new(block_announce_validator) - })), - warp_sync: None, - })?; - - let rpc_client = client.clone(); - let rpc_transaction_pool = transaction_pool.clone(); - let rpc_extensions_builder = Box::new(move |deny_unsafe, _| { - Ok(rpc_ext_builder(deny_unsafe, rpc_client.clone(), rpc_transaction_pool.clone())) - }); - - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - rpc_extensions_builder, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - task_manager: &mut task_manager, - config: parachain_config, - keystore: params.keystore_container.sync_keystore(), - backend: backend.clone(), - network: network.clone(), - system_rpc_tx, - telemetry: telemetry.as_mut(), - })?; - - let announce_block = { - let network = network.clone(); - Arc::new(move |hash, data| network.announce_block(hash, data)) - }; - - let relay_chain_slot_duration = Duration::from_secs(6); - - if validator { - let parachain_consensus = build_consensus( - client.clone(), - prometheus_registry.as_ref(), - telemetry.as_ref().map(|t| t.handle()), - &task_manager, - relay_chain_interface.clone(), - transaction_pool, - network, - params.keystore_container.sync_keystore(), - force_authoring, - )?; - - let spawner = task_manager.spawn_handle(); - - let params = StartCollatorParams { - para_id: id, - block_status: client.clone(), - announce_block, - client: client.clone(), - task_manager: &mut task_manager, - relay_chain_interface, - spawner, - parachain_consensus, - import_queue, - collator_key: collator_key.expect("Command line arguments do not allow this. qed"), - relay_chain_slot_duration, - }; - - start_collator(params).await?; - } else { - let params = StartFullNodeParams { - client: client.clone(), - announce_block, - task_manager: &mut task_manager, - para_id: id, - relay_chain_interface, - relay_chain_slot_duration, - import_queue, - collator_options, - }; - - start_full_node(params)?; - } - - start_network.start_network(); - - Ok((task_manager, client)) -} - -/// Build the import queue for the the parachain runtime. -#[allow(clippy::type_complexity)] -pub fn parachain_build_import_queue( - client: Arc>>, - config: &Configuration, - telemetry: Option, - task_manager: &TaskManager, -) -> Result< - sc_consensus::DefaultImportQueue< - Block, - TFullClient>, - >, - sc_service::Error, -> { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - cumulus_client_consensus_aura::import_queue::< - sp_consensus_aura::sr25519::AuthorityPair, - _, - _, - _, - _, - _, - _, - >(cumulus_client_consensus_aura::ImportQueueParams { - block_import: client.clone(), - client: client.clone(), - create_inherent_data_providers: move |_, _| async move { - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *time, - slot_duration, - ); - - Ok((time, slot)) - }, - registry: config.prometheus_registry(), - can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()), - spawner: &task_manager.spawn_essential_handle(), - telemetry, - }) - .map_err(Into::into) -} - -/// Start a normal parachain node. -pub async fn start_node( - parachain_config: Configuration, - polkadot_config: Configuration, - collator_options: CollatorOptions, - id: ParaId, -) -> sc_service::error::Result<( - TaskManager, - Arc>>, -)> { - start_node_impl::( - parachain_config, - polkadot_config, - collator_options, - id, - |deny_unsafe, client, pool| { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; - use substrate_frame_rpc_system::{FullSystem, SystemApi}; - - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with(SystemApi::to_delegate(FullSystem::new( - client.clone(), - pool, - deny_unsafe, - ))); - io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client))); - io - }, - parachain_build_import_queue, - |client, - prometheus_registry, - telemetry, - task_manager, - relay_chain_interface, - transaction_pool, - sync_oracle, - keystore, - force_authoring| { - let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?; - - let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( - task_manager.spawn_handle(), - client.clone(), - transaction_pool, - prometheus_registry, - telemetry.clone(), - ); - - Ok(AuraConsensus::build::( - BuildAuraConsensusParams { - proposer_factory, - create_inherent_data_providers: move |_, (relay_parent, validation_data)| { - let relay_chain_interface = relay_chain_interface.clone(); - async move { - let parachain_inherent = - cumulus_primitives_parachain_inherent::ParachainInherentData::create_at( - relay_parent, - &relay_chain_interface, - &validation_data, - id, - ).await; - let time = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *time, - slot_duration, - ); - - let parachain_inherent = parachain_inherent.ok_or_else(|| { - Box::::from( - "Failed to create parachain inherent", - ) - })?; - Ok((time, slot, parachain_inherent)) - } - }, - block_import: client.clone(), - para_client: client, - backoff_authoring_blocks: Option::<()>::None, - sync_oracle, - keystore, - force_authoring, - slot_duration, - // We got around 500ms for proposing - block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32), - telemetry, - max_block_proposal_slot_portion: None, - }, - )) - }, - ) - .await -} diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml b/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml deleted file mode 100644 index 1d0870fcbcd..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/runtime/Cargo.toml +++ /dev/null @@ -1,122 +0,0 @@ -[package] -name = "rialto-parachain-runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[dependencies] -codec = { package = 'parity-scale-codec', version = '3.0.0', default-features = false, features = ['derive']} -log = { version = "0.4.14", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = '1.0', optional = true, features = ['derive'] } - -# Bridge depedencies - -bp-rialto-parachain = { path = "../../../primitives/chain-rialto-parachain", default-features = false } - -# Substrate Dependencies -## Substrate Primitive Dependencies -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -## Substrate FRAME Dependencies -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -## Substrate Pallet Dependencies -pallet-aura = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -# Cumulus Dependencies -cumulus-pallet-aura-ext = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -cumulus-pallet-parachain-system = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -cumulus-pallet-dmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -cumulus-pallet-xcm = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -cumulus-pallet-xcmp-queue = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -cumulus-primitives-core = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -cumulus-primitives-timestamp = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -cumulus-primitives-utility = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } -parachain-info = { git = "https://github.com/paritytech/cumulus", branch = "master", default-features = false } - -# Polkadot Dependencies -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } -xcm = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } -xcm-builder = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } -xcm-executor = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } -pallet-xcm = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } - -[features] -default = ['std'] -runtime-benchmarks = [ - 'sp-runtime/runtime-benchmarks', - 'frame-benchmarking', - 'frame-support/runtime-benchmarks', - 'frame-system-benchmarking', - 'frame-system/runtime-benchmarks', - 'pallet-balances/runtime-benchmarks', - 'pallet-timestamp/runtime-benchmarks', -] -std = [ - "bp-rialto-parachain/std", - "codec/std", - "log/std", - "scale-info/std", - "serde", - "sp-api/std", - "sp-std/std", - "sp-io/std", - "sp-core/std", - "sp-runtime/std", - "sp-version/std", - "sp-offchain/std", - "sp-session/std", - "sp-block-builder/std", - "sp-transaction-pool/std", - "sp-inherents/std", - "frame-support/std", - "frame-executive/std", - "frame-system/std", - "pallet-balances/std", - "pallet-randomness-collective-flip/std", - "pallet-timestamp/std", - "pallet-sudo/std", - "pallet-transaction-payment/std", - "parachain-info/std", - "cumulus-pallet-aura-ext/std", - "cumulus-pallet-parachain-system/std", - "cumulus-pallet-xcmp-queue/std", - "cumulus-pallet-xcm/std", - "cumulus-primitives-core/std", - "cumulus-primitives-timestamp/std", - "cumulus-primitives-utility/std", - "xcm/std", - "xcm-builder/std", - "xcm-executor/std", - "pallet-aura/std", - "sp-consensus-aura/std", -] diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/build.rs b/polkadot/bridges/bin/rialto-parachain/runtime/build.rs deleted file mode 100644 index 65095bd1b7e..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/runtime/build.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use substrate_wasm_builder::WasmBuilder; - -fn main() { - WasmBuilder::new() - .with_current_project() - .export_heap_base() - .import_memory() - .build() -} diff --git a/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs b/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs deleted file mode 100644 index 555db03a9c4..00000000000 --- a/polkadot/bridges/bin/rialto-parachain/runtime/src/lib.rs +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The Rialto parachain runtime. This can be compiled with `#[no_std]`, ready for Wasm. -//! -//! Originally a copy of runtime from https://github.com/substrate-developer-hub/substrate-parachain-template. - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -use sp_api::impl_runtime_apis; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, Block as BlockT}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, -}; - -use sp_std::prelude::*; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -// A few exports that help ease life for downstream crates. -pub use frame_support::{ - construct_runtime, match_types, parameter_types, - traits::{Everything, IsInVec, Randomness}, - weights::{ - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, - DispatchClass, IdentityFee, Weight, - }, - StorageValue, -}; -pub use frame_system::{Call as SystemCall, EnsureRoot}; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_timestamp::Call as TimestampCall; -pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{MultiAddress, Perbill, Permill}; - -pub use bp_rialto_parachain::{ - AccountId, Balance, BlockLength, BlockNumber, BlockWeights, Hash, Hasher as Hashing, Header, - Index, Signature, MAXIMUM_BLOCK_WEIGHT, -}; - -// Polkadot & XCM imports -use pallet_xcm::XcmPassthrough; -use polkadot_parachain::primitives::Sibling; -use xcm::latest::prelude::*; -use xcm_builder::{ - AccountId32Aliases, AllowTopLevelPaidExecutionFrom, AllowUnpaidExecutionFrom, CurrencyAdapter, - EnsureXcmOrigin, FixedWeightBounds, IsConcrete, LocationInverter, NativeAsset, - ParentAsSuperuser, ParentIsPreset, RelayChainAsNative, SiblingParachainAsNative, - SiblingParachainConvertsVia, SignedAccountId32AsNative, SignedToAccountId32, - SovereignSignedViaLocation, TakeWeightCredit, UsingComponents, -}; -use xcm_executor::{Config, XcmExecutor}; - -/// The address format for describing accounts. -pub type Address = MultiAddress; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// BlockId type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The SignedExtension to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -/// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - -impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - } -} - -/// This runtime version. -#[sp_version::runtime_version] -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("template-parachain"), - impl_name: create_runtime_str!("template-parachain"), - authoring_version: 1, - spec_version: 1, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, - state_version: 0, -}; - -/// This determines the average expected block time that we are targeting. -/// Blocks will be produced at a minimum duration defined by `SLOT_DURATION`. -/// `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked -/// up by `pallet_aura` to implement `fn slot_duration()`. -/// -/// Change this to adjust the block time. -pub const MILLISECS_PER_BLOCK: u64 = 12000; - -pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - -pub const EPOCH_DURATION_IN_BLOCKS: u32 = 10 * MINUTES; - -// Time is measured by number of blocks. -pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); -pub const HOURS: BlockNumber = MINUTES * 60; -pub const DAYS: BlockNumber = HOURS * 24; - -// Unit = the base number of indivisible units for balances -pub const UNIT: Balance = 1_000_000_000_000; -pub const MILLIUNIT: Balance = 1_000_000_000; -pub const MICROUNIT: Balance = 1_000_000; - -// 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. -pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const Version: RuntimeVersion = VERSION; - pub const SS58Prefix: u8 = 48; -} - -// Configure FRAME pallets to include in runtime. - -impl frame_system::Config for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. - type Index = Index; - /// The index type for blocks. - type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = Hashing; - /// The header type. - type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous origin type. - type Origin = Origin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Runtime version. - type Version = Version; - /// Converts a module to an index of this module in the runtime. - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The weight of database operations that the runtime can invoke. - type DbWeight = (); - /// The basic call filter to use in dispatchable. - type BaseCallFilter = Everything; - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); - /// Block & extrinsics weights: base values and limits. - type BlockWeights = BlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = BlockLength; - /// This is used as an identifier of the chain. 42 is the generic substrate prefix. - type SS58Prefix = SS58Prefix; - /// The action to take on a Runtime Upgrade - type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -parameter_types! { - pub const MinimumPeriod: u64 = SLOT_DURATION / 2; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the Unix epoch. - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; - type WeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: u128 = MILLIUNIT; - pub const TransferFee: u128 = MILLIUNIT; - pub const CreationFee: u128 = MILLIUNIT; - pub const TransactionByteFee: u128 = MICROUNIT; - pub const OperationalFeeMultiplier: u8 = 5; - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = pallet_balances::weights::SubstrateWeight; - type MaxLocks = MaxLocks; - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; -} - -impl pallet_transaction_payment::Config for Runtime { - type Event = Event; - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type TransactionByteFee = TransactionByteFee; - type OperationalFeeMultiplier = OperationalFeeMultiplier; - type WeightToFee = IdentityFee; - type FeeMultiplierUpdate = (); -} - -impl pallet_sudo::Config for Runtime { - type Call = Call; - type Event = Event; -} - -parameter_types! { - pub const ReservedXcmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4; - pub const ReservedDmpWeight: Weight = MAXIMUM_BLOCK_WEIGHT / 4; -} - -impl cumulus_pallet_parachain_system::Config for Runtime { - type Event = Event; - type OnSystemEvent = (); - type SelfParaId = parachain_info::Pallet; - type OutboundXcmpMessageSource = XcmpQueue; - type DmpMessageHandler = DmpQueue; - type ReservedDmpWeight = ReservedDmpWeight; - type XcmpMessageHandler = XcmpQueue; - type ReservedXcmpWeight = ReservedXcmpWeight; -} - -impl parachain_info::Config for Runtime {} - -impl cumulus_pallet_aura_ext::Config for Runtime {} - -impl pallet_randomness_collective_flip::Config for Runtime {} - -parameter_types! { - pub const RelayLocation: MultiLocation = MultiLocation::parent(); - pub const RelayNetwork: NetworkId = NetworkId::Polkadot; - pub RelayOrigin: Origin = cumulus_pallet_xcm::Origin::Relay.into(); - pub Ancestry: MultiLocation = Parachain(ParachainInfo::parachain_id().into()).into(); -} - -/// Type for specifying how a `MultiLocation` can be converted into an `AccountId`. This is used -/// when determining ownership of accounts for asset transacting and when attempting to use XCM -/// `Transact` in order to determine the dispatch Origin. -pub type LocationToAccountId = ( - // The parent (Relay-chain) origin converts to the default `AccountId`. - ParentIsPreset, - // Sibling parachain origins convert to AccountId via the `ParaId::into`. - SiblingParachainConvertsVia, - // Straight up local `AccountId32` origins just alias directly to `AccountId`. - AccountId32Aliases, -); - -/// Means for transacting assets on this chain. -pub type LocalAssetTransactor = CurrencyAdapter< - // Use this currency: - Balances, - // Use this currency when it is a fungible asset matching the given location or name: - IsConcrete, - // Do a simple punn to convert an AccountId32 MultiLocation into a native chain account ID: - LocationToAccountId, - // Our chain's account ID type (we can't get away without mentioning it explicitly): - AccountId, - // We don't track any teleports. - (), ->; - -/// This is the type we use to convert an (incoming) XCM origin into a local `Origin` instance, -/// ready for dispatching a transaction with XCM `Transact`. There is an `OriginKind` which can -/// biases the kind of local `Origin` it will become. -pub type XcmOriginToTransactDispatchOrigin = ( - // Sovereign account converter; this attempts to derive an `AccountId` from the origin location - // using `LocationToAccountId` and then turn that into the usual `Signed` origin. Useful for - // foreign chains who want to have a local sovereign account on this chain which they control. - SovereignSignedViaLocation, - // Native converter for Relay-chain (Parent) location; will converts to a `Relay` origin when - // recognised. - RelayChainAsNative, - // Native converter for sibling Parachains; will convert to a `SiblingPara` origin when - // recognised. - SiblingParachainAsNative, - // Superuser converter for the Relay-chain (Parent) location. This will allow it to issue a - // transaction from the Root origin. - ParentAsSuperuser, - // Native signed account converter; this just converts an `AccountId32` origin into a normal - // `Origin::Signed` origin of the same 32-byte value. - SignedAccountId32AsNative, - // Xcm origins can be represented natively under the Xcm pallet's Xcm origin. - XcmPassthrough, -); - -parameter_types! { - // One XCM operation is 1_000_000 weight - almost certainly a conservative estimate. - pub UnitWeightCost: Weight = 1_000_000; - // One UNIT buys 1 second of weight. - pub const WeightPrice: (MultiLocation, u128) = (MultiLocation::parent(), UNIT); - pub const MaxInstructions: u32 = 100; - pub const MaxAuthorities: u32 = 100_000; -} - -match_types! { - pub type ParentOrParentsUnitPlurality: impl Contains = { - MultiLocation { parents: 1, interior: Here } | - MultiLocation { parents: 1, interior: X1(Plurality { id: BodyId::Unit, .. }) } - }; -} - -pub type Barrier = ( - TakeWeightCredit, - AllowTopLevelPaidExecutionFrom, - AllowUnpaidExecutionFrom, - // ^^^ Parent & its unit plurality gets free execution -); - -pub struct XcmConfig; -impl Config for XcmConfig { - type Call = Call; - type XcmSender = XcmRouter; - // How to withdraw and deposit an asset. - type AssetTransactor = LocalAssetTransactor; - type OriginConverter = XcmOriginToTransactDispatchOrigin; - type IsReserve = NativeAsset; - type IsTeleporter = NativeAsset; // <- should be enough to allow teleportation of UNIT - type LocationInverter = LocationInverter; - type Barrier = Barrier; - type Weigher = FixedWeightBounds; - type Trader = UsingComponents, RelayLocation, AccountId, Balances, ()>; - type ResponseHandler = PolkadotXcm; - type AssetTrap = PolkadotXcm; - type AssetClaims = PolkadotXcm; - type SubscriptionService = PolkadotXcm; -} - -/// No local origins on this chain are allowed to dispatch XCM sends/executions. -pub type LocalOriginToLocation = SignedToAccountId32; - -/// The means for routing XCM messages which are not for local execution into the right message -/// queues. -pub type XcmRouter = ( - // Two routers - use UMP to communicate with the relay chain: - cumulus_primitives_utility::ParentAsUmp, - // ..and XCMP to communicate with the sibling chains. - XcmpQueue, -); - -impl pallet_xcm::Config for Runtime { - type Event = Event; - type SendXcmOrigin = EnsureXcmOrigin; - type XcmRouter = XcmRouter; - type ExecuteXcmOrigin = EnsureXcmOrigin; - type XcmExecuteFilter = Everything; - type XcmExecutor = XcmExecutor; - type XcmTeleportFilter = Everything; - type XcmReserveTransferFilter = Everything; - type Weigher = FixedWeightBounds; - type LocationInverter = LocationInverter; - type Origin = Origin; - type Call = Call; - const VERSION_DISCOVERY_QUEUE_SIZE: u32 = 100; - type AdvertisedXcmVersion = pallet_xcm::CurrentXcmVersion; -} - -impl cumulus_pallet_xcm::Config for Runtime { - type Event = Event; - type XcmExecutor = XcmExecutor; -} - -impl cumulus_pallet_xcmp_queue::Config for Runtime { - type Event = Event; - type XcmExecutor = XcmExecutor; - type ChannelInfo = ParachainSystem; - type VersionWrapper = (); - type ExecuteOverweightOrigin = EnsureRoot; - type ControllerOrigin = EnsureRoot; - type ControllerOriginConverter = XcmOriginToTransactDispatchOrigin; - type WeightInfo = (); -} - -impl cumulus_pallet_dmp_queue::Config for Runtime { - type Event = Event; - type XcmExecutor = XcmExecutor; - type ExecuteOverweightOrigin = frame_system::EnsureRoot; -} - -impl pallet_aura::Config for Runtime { - type AuthorityId = AuraId; - type DisabledValidators = (); - type MaxAuthorities = MaxAuthorities; -} - -// /// Configure the pallet template in pallets/template. -// impl template::Config for Runtime { -// type Event = Event; -// } - -// Create the runtime by composing the FRAME pallets that were previously configured. -construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = generic::Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Storage, Config, Event}, - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Sudo: pallet_sudo::{Pallet, Call, Storage, Config, Event}, - RandomnessCollectiveFlip: pallet_randomness_collective_flip::{Pallet, Storage}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - - ParachainSystem: cumulus_pallet_parachain_system::{Pallet, Call, Storage, Inherent, Event} = 20, - ParachainInfo: parachain_info::{Pallet, Storage, Config} = 21, - - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event} = 30, - - Aura: pallet_aura::{Pallet, Config}, - AuraExt: cumulus_pallet_aura_ext::{Pallet, Config}, - - // XCM helpers. - XcmpQueue: cumulus_pallet_xcmp_queue::{Pallet, Call, Storage, Event} = 50, - PolkadotXcm: pallet_xcm::{Pallet, Call, Event, Origin} = 51, - CumulusXcm: cumulus_pallet_xcm::{Pallet, Call, Event, Origin} = 52, - DmpQueue: cumulus_pallet_dmp_queue::{Pallet, Call, Storage, Event} = 53, - - // //Template - // TemplatePallet: template::{Pallet, Call, Storage, Event}, - } -); - -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - define_benchmarks!( - [frame_system, SystemBench::] - [pallet_balances, Balances] - [pallet_timestamp, Timestamp] - ); -} - -impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic( - extrinsic: ::Extrinsic, - ) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_session::SessionKeys for Runtime { - fn decode_session_keys( - encoded: Vec, - ) -> Option, KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> sp_consensus_aura::SlotDuration { - sp_consensus_aura::SlotDuration::from_millis(Aura::slot_duration()) - } - - fn authorities() -> Vec { - Aura::authorities().to_vec() - } - } - - impl cumulus_primitives_core::CollectCollationInfo for Runtime { - fn collect_collation_info(header: &::Header) -> cumulus_primitives_core::CollationInfo { - ParachainSystem::collect_collation_info(header) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi for Runtime { - fn query_info( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details( - uxt: ::Extrinsic, - len: u32, - ) -> pallet_transaction_payment::FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn dispatch_benchmark( - config: frame_benchmarking::BenchmarkConfig - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, TrackedStorageKey}; - - use frame_system_benchmarking::Pallet as SystemBench; - impl frame_system_benchmarking::Config for Runtime {} - - let whitelist: Vec = vec![ - // Block Number - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef702a5c1b19ab7a04f536c519aca4983ac").to_vec().into(), - // Total Issuance - hex_literal::hex!("c2261276cc9d1f8598ea4b6a74b15c2f57c875e4cff74148e4628f264b974c80").to_vec().into(), - // Execution Phase - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7ff553b5a9862a516939d82b3d3d8661a").to_vec().into(), - // Event Count - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef70a98fdbe9ce6c55837576c60c7af3850").to_vec().into(), - // System Events - hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), - ]; - - let mut batches = Vec::::new(); - let params = (&config, &whitelist); - add_benchmarks!(params, batches); - - Ok(batches) - } - } -} - -struct CheckInherents; - -impl cumulus_pallet_parachain_system::CheckInherents for CheckInherents { - fn check_inherents( - block: &Block, - relay_state_proof: &cumulus_pallet_parachain_system::RelayChainStateProof, - ) -> sp_inherents::CheckInherentsResult { - let relay_chain_slot = relay_state_proof - .read_slot() - .expect("Could not read the relay chain slot from the proof"); - - let inherent_data = - cumulus_primitives_timestamp::InherentDataProvider::from_relay_chain_slot_and_duration( - relay_chain_slot, - sp_std::time::Duration::from_secs(6), - ) - .create_inherent_data() - .expect("Could not create the timestamp inherent data"); - - inherent_data.check_extrinsics(block) - } -} - -cumulus_pallet_parachain_system::register_validate_block!( - Runtime = Runtime, - BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, - CheckInherents = CheckInherents, -); diff --git a/polkadot/bridges/bin/rialto/node/Cargo.toml b/polkadot/bridges/bin/rialto/node/Cargo.toml deleted file mode 100644 index 512aa3ba710..00000000000 --- a/polkadot/bridges/bin/rialto/node/Cargo.toml +++ /dev/null @@ -1,92 +0,0 @@ -[package] -name = "rialto-bridge-node" -description = "Substrate node compatible with Rialto runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -build = "build.rs" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -clap = { version = "3.1", features = ["derive"] } -futures = "0.3" -jsonrpc-core = "18.0" -kvdb = "0.11" -kvdb-rocksdb = "0.15" -lru = "0.7" -serde_json = "1.0.59" -thiserror = "1.0" - -# Bridge dependencies - -bp-runtime = { path = "../../../primitives/runtime" } -bp-rialto = { path = "../../../primitives/chain-rialto" } -pallet-bridge-messages = { path = "../../../modules/messages" } -rialto-runtime = { path = "../runtime" } - -# Substrate Dependencies - -beefy-gadget = { git = "https://github.com/paritytech/substrate", branch = "master" } -beefy-gadget-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -node-inspect = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-mmr-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["wasmtime"] } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-uncles = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master"} -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-telemetry = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-frame-rpc-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } - -# Polkadot Dependencies - -polkadot-node-core-pvf = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-service = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false, features = [ "full-node", "polkadot-native" ] } - -[build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = [] -runtime-benchmarks = [ - "rialto-runtime/runtime-benchmarks", -] diff --git a/polkadot/bridges/bin/rialto/node/build.rs b/polkadot/bridges/bin/rialto/node/build.rs deleted file mode 100644 index d9b50049e26..00000000000 --- a/polkadot/bridges/bin/rialto/node/build.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; - -fn main() { - generate_cargo_keys(); - - rerun_if_git_head_changed(); -} diff --git a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs b/polkadot/bridges/bin/rialto/node/src/chain_spec.rs deleted file mode 100644 index 10315e33c85..00000000000 --- a/polkadot/bridges/bin/rialto/node/src/chain_spec.rs +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use beefy_primitives::crypto::AuthorityId as BeefyId; -use bp_rialto::derive_account_from_millau_id; -use polkadot_primitives::v2::{AssignmentId, ValidatorId}; -use rialto_runtime::{ - AccountId, BabeConfig, BalancesConfig, BeefyConfig, BridgeMillauMessagesConfig, - ConfigurationConfig, GenesisConfig, GrandpaConfig, SessionConfig, SessionKeys, Signature, - SudoConfig, SystemConfig, WASM_BINARY, -}; -use serde_json::json; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_consensus_babe::AuthorityId as BabeId; -use sp_core::{sr25519, Pair, Public}; -use sp_finality_grandpa::AuthorityId as GrandpaId; -use sp_runtime::traits::{IdentifyAccount, Verify}; - -/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = - sc_service::GenericChainSpec; - -/// The chain specification option. This is expected to come in from the CLI and -/// is little more than one of a number of alternatives which can easily be converted -/// from a string (`--chain=...`) into a `ChainSpec`. -#[derive(Clone, Debug)] -pub enum Alternative { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob/Charlie/Dave/Eve auths. - LocalTestnet, -} - -/// Helper function to generate a crypto pair from seed -pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() -} - -type AccountPublic = ::Signer; - -/// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId -where - AccountPublic: From<::Public>, -{ - AccountPublic::from(get_from_seed::(seed)).into_account() -} - -/// Helper function to generate authority keys. -pub fn get_authority_keys_from_seed( - s: &str, -) -> (AccountId, BabeId, BeefyId, GrandpaId, ValidatorId, AssignmentId, AuthorityDiscoveryId) { - ( - get_account_id_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - get_from_seed::(s), - ) -} - -impl Alternative { - /// Get an actual chain config from one of the alternatives. - pub(crate) fn load(self) -> ChainSpec { - let properties = Some( - json!({ - "tokenDecimals": 9, - "tokenSymbol": "RLT" - }) - .as_object() - .expect("Map given; qed") - .clone(), - ); - match self { - Alternative::Development => ChainSpec::from_genesis( - "Rialto Development", - "rialto_dev", - sc_service::ChainType::Development, - || { - testnet_genesis( - vec![get_authority_keys_from_seed("Alice")], - get_account_id_from_seed::("Alice"), - endowed_accounts(), - true, - ) - }, - vec![], - None, - None, - None, - properties, - Default::default(), - ), - Alternative::LocalTestnet => ChainSpec::from_genesis( - "Rialto Local", - "rialto_local", - sc_service::ChainType::Local, - || { - testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - get_authority_keys_from_seed("Charlie"), - get_authority_keys_from_seed("Dave"), - get_authority_keys_from_seed("Eve"), - ], - get_account_id_from_seed::("Alice"), - endowed_accounts(), - true, - ) - }, - vec![], - None, - None, - None, - properties, - Default::default(), - ), - } - } -} - -/// We're using the same set of endowed accounts on all Millau chains (dev/local) to make -/// sure that all accounts, required for bridge to be functional (e.g. relayers fund account, -/// accounts used by relayers in our test deployments, accounts used for demonstration -/// purposes), are all available on these chains. -fn endowed_accounts() -> Vec { - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("George"), - get_account_id_from_seed::("Harry"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - get_account_id_from_seed::("George//stash"), - get_account_id_from_seed::("Harry//stash"), - get_account_id_from_seed::("MillauMessagesOwner"), - get_account_id_from_seed::("WithMillauTokenSwap"), - pallet_bridge_messages::relayer_fund_account_id::< - bp_rialto::AccountId, - bp_rialto::AccountIdConverter, - >(), - derive_account_from_millau_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Alice"), - )), - derive_account_from_millau_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Bob"), - )), - derive_account_from_millau_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Charlie"), - )), - derive_account_from_millau_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Dave"), - )), - derive_account_from_millau_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Eve"), - )), - derive_account_from_millau_id(bp_runtime::SourceAccount::Account( - get_account_id_from_seed::("Ferdie"), - )), - ] -} - -fn session_keys( - babe: BabeId, - beefy: BeefyId, - grandpa: GrandpaId, - para_validator: ValidatorId, - para_assignment: AssignmentId, - authority_discovery: AuthorityDiscoveryId, -) -> SessionKeys { - SessionKeys { babe, beefy, grandpa, para_validator, para_assignment, authority_discovery } -} - -fn testnet_genesis( - initial_authorities: Vec<( - AccountId, - BabeId, - BeefyId, - GrandpaId, - ValidatorId, - AssignmentId, - AuthorityDiscoveryId, - )>, - root_key: AccountId, - endowed_accounts: Vec, - _enable_println: bool, -) -> GenesisConfig { - GenesisConfig { - system: SystemConfig { - code: WASM_BINARY.expect("Rialto development WASM not available").to_vec(), - }, - balances: BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k| (k, 1 << 50)).collect(), - }, - babe: BabeConfig { - authorities: Vec::new(), - epoch_config: Some(rialto_runtime::BABE_GENESIS_EPOCH_CONFIG), - }, - beefy: BeefyConfig { authorities: Vec::new() }, - grandpa: GrandpaConfig { authorities: Vec::new() }, - sudo: SudoConfig { key: Some(root_key) }, - session: SessionConfig { - keys: initial_authorities - .iter() - .map(|x| { - ( - x.0.clone(), - x.0.clone(), - session_keys( - x.1.clone(), - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - x.6.clone(), - ), - ) - }) - .collect::>(), - }, - authority_discovery: Default::default(), - hrmp: Default::default(), - // this configuration is exact copy of configuration from Polkadot repo - // (see /node/service/src/chain_spec.rs:default_parachains_host_configuration) - configuration: ConfigurationConfig { - config: polkadot_runtime_parachains::configuration::HostConfiguration { - validation_upgrade_cooldown: 2u32, - validation_upgrade_delay: 2, - code_retention_period: 1200, - max_code_size: polkadot_primitives::v2::MAX_CODE_SIZE, - max_pov_size: polkadot_primitives::v2::MAX_POV_SIZE, - max_head_data_size: 32 * 1024, - group_rotation_frequency: 20, - chain_availability_period: 4, - thread_availability_period: 4, - max_upward_queue_count: 8, - max_upward_queue_size: 1024 * 1024, - max_downward_message_size: 1024 * 1024, - ump_service_total_weight: 100_000_000_000, - max_upward_message_size: 50 * 1024, - max_upward_message_num_per_candidate: 5, - hrmp_sender_deposit: 0, - hrmp_recipient_deposit: 0, - hrmp_channel_max_capacity: 8, - hrmp_channel_max_total_size: 8 * 1024, - hrmp_max_parachain_inbound_channels: 4, - hrmp_max_parathread_inbound_channels: 4, - hrmp_channel_max_message_size: 1024 * 1024, - hrmp_max_parachain_outbound_channels: 4, - hrmp_max_parathread_outbound_channels: 4, - hrmp_max_message_num_per_candidate: 5, - dispute_period: 6, - no_show_slots: 2, - n_delay_tranches: 25, - needed_approvals: 2, - relay_vrf_modulo_samples: 2, - zeroth_delay_tranche_width: 0, - minimum_validation_upgrade_delay: 5, - ..Default::default() - }, - }, - paras: Default::default(), - bridge_millau_messages: BridgeMillauMessagesConfig { - owner: Some(get_account_id_from_seed::("MillauMessagesOwner")), - ..Default::default() - }, - } -} - -#[test] -fn derived_dave_account_is_as_expected() { - let dave = get_account_id_from_seed::("Dave"); - let derived: AccountId = - derive_account_from_millau_id(bp_runtime::SourceAccount::Account(dave)); - assert_eq!(derived.to_string(), "5HZhdv53gSJmWWtD8XR5Ypu4PgbT5JNWwGw2mkE75cN61w9t".to_string()); -} diff --git a/polkadot/bridges/bin/rialto/node/src/cli.rs b/polkadot/bridges/bin/rialto/node/src/cli.rs deleted file mode 100644 index bb7f54998dd..00000000000 --- a/polkadot/bridges/bin/rialto/node/src/cli.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use clap::Parser; -use sc_cli::RunCmd; - -#[derive(Debug, Parser)] -pub struct Cli { - #[structopt(subcommand)] - pub subcommand: Option, - - #[structopt(flatten)] - pub run: RunCmd, -} - -/// Possible subcommands of the main binary. -#[derive(Debug, Parser)] -pub enum Subcommand { - /// Key management CLI utilities - #[clap(subcommand)] - Key(sc_cli::KeySubcommand), - - /// Verify a signature for a message, provided on `STDIN`, with a given (public or secret) key. - Verify(sc_cli::VerifyCmd), - - /// Generate a seed that provides a vanity address. - Vanity(sc_cli::VanityCmd), - - /// Sign a message, with a given (secret) key. - Sign(sc_cli::SignCmd), - - /// Build a chain specification. - BuildSpec(sc_cli::BuildSpecCmd), - - /// Validate blocks. - CheckBlock(sc_cli::CheckBlockCmd), - - /// Export blocks. - ExportBlocks(sc_cli::ExportBlocksCmd), - - /// Export the state of a given block into a chain spec. - ExportState(sc_cli::ExportStateCmd), - - /// Import blocks. - ImportBlocks(sc_cli::ImportBlocksCmd), - - /// Remove the whole chain. - PurgeChain(sc_cli::PurgeChainCmd), - - /// Revert the chain to a previous state. - Revert(sc_cli::RevertCmd), - - /// Inspect blocks or extrinsics. - Inspect(node_inspect::cli::InspectCmd), - - /// Benchmark runtime pallets. - Benchmark(frame_benchmarking_cli::BenchmarkCmd), - - /// FOR INTERNAL USE: analog of the "prepare-worker" command of the polkadot binary. - #[clap(name = "prepare-worker", hide = true)] - PvfPrepareWorker(ValidationWorkerCommand), - - /// FOR INTERNAL USE: analog of the "execute-worker" command of the polkadot binary. - #[clap(name = "execute-worker", hide = true)] - PvfExecuteWorker(ValidationWorkerCommand), -} - -/// Validation worker command. -#[derive(Debug, Parser)] -pub struct ValidationWorkerCommand { - /// The path to the validation host's socket. - pub socket_path: String, -} diff --git a/polkadot/bridges/bin/rialto/node/src/command.rs b/polkadot/bridges/bin/rialto/node/src/command.rs deleted file mode 100644 index da92837f06c..00000000000 --- a/polkadot/bridges/bin/rialto/node/src/command.rs +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::{Cli, Subcommand}; -use rialto_runtime::{Block, RuntimeApi}; -use sc_cli::{ChainSpec, Role, RuntimeVersion, SubstrateCli}; - -impl SubstrateCli for Cli { - fn impl_name() -> String { - "Rialto Bridge Node".into() - } - - fn impl_version() -> String { - env!("CARGO_PKG_VERSION").into() - } - - fn description() -> String { - "Rialto Bridge Node".into() - } - - fn author() -> String { - "Parity Technologies".into() - } - - fn support_url() -> String { - "https://github.com/paritytech/parity-bridges-common/".into() - } - - fn copyright_start_year() -> i32 { - 2019 - } - - fn executable_name() -> String { - "rialto-bridge-node".into() - } - - fn native_runtime_version(_: &Box) -> &'static RuntimeVersion { - &rialto_runtime::VERSION - } - - fn load_spec(&self, id: &str) -> Result, String> { - Ok(Box::new( - match id { - "" | "dev" => crate::chain_spec::Alternative::Development, - "local" => crate::chain_spec::Alternative::LocalTestnet, - _ => return Err(format!("Unsupported chain specification: {}", id)), - } - .load(), - )) - } -} - -// Rialto native executor instance. -pub struct ExecutorDispatch; - -impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; - - fn dispatch(method: &str, data: &[u8]) -> Option> { - rialto_runtime::api::dispatch(method, data) - } - - fn native_version() -> sc_executor::NativeVersion { - rialto_runtime::native_version() - } -} - -/// Parse and run command line arguments -pub fn run() -> sc_cli::Result<()> { - let cli = Cli::from_args(); - sp_core::crypto::set_default_ss58_version(sp_core::crypto::Ss58AddressFormat::custom( - rialto_runtime::SS58Prefix::get() as u16, - )); - - match &cli.subcommand { - Some(Subcommand::Benchmark(cmd)) => - if cfg!(feature = "runtime-benchmarks") { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run::(config)) - } else { - println!( - "Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`." - ); - Ok(()) - }, - Some(Subcommand::Key(cmd)) => cmd.run(&cli), - Some(Subcommand::Sign(cmd)) => cmd.run(), - Some(Subcommand::Verify(cmd)) => cmd.run(), - Some(Subcommand::Vanity(cmd)) => cmd.run(), - Some(Subcommand::BuildSpec(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.chain_spec, config.network)) - }, - Some(Subcommand::CheckBlock(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|mut config| { - let (client, _, import_queue, task_manager) = - polkadot_service::new_chain_ops(&mut config, None).map_err(service_error)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - }, - Some(Subcommand::ExportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|mut config| { - let (client, _, _, task_manager) = - polkadot_service::new_chain_ops(&mut config, None).map_err(service_error)?; - Ok((cmd.run(client, config.database), task_manager)) - }) - }, - Some(Subcommand::ExportState(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|mut config| { - let (client, _, _, task_manager) = - polkadot_service::new_chain_ops(&mut config, None).map_err(service_error)?; - Ok((cmd.run(client, config.chain_spec), task_manager)) - }) - }, - Some(Subcommand::ImportBlocks(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|mut config| { - let (client, _, import_queue, task_manager) = - polkadot_service::new_chain_ops(&mut config, None).map_err(service_error)?; - Ok((cmd.run(client, import_queue), task_manager)) - }) - }, - Some(Subcommand::PurgeChain(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run(config.database)) - }, - Some(Subcommand::Revert(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.async_run(|mut config| { - let (client, backend, _, task_manager) = - polkadot_service::new_chain_ops(&mut config, None).map_err(service_error)?; - Ok((cmd.run(client, backend), task_manager)) - }) - }, - Some(Subcommand::Inspect(cmd)) => { - let runner = cli.create_runner(cmd)?; - runner.sync_run(|config| cmd.run::(config)) - }, - Some(Subcommand::PvfPrepareWorker(cmd)) => { - let mut builder = sc_cli::LoggerBuilder::new(""); - builder.with_colors(false); - let _ = builder.init(); - - polkadot_node_core_pvf::prepare_worker_entrypoint(&cmd.socket_path); - Ok(()) - }, - Some(crate::cli::Subcommand::PvfExecuteWorker(cmd)) => { - let mut builder = sc_cli::LoggerBuilder::new(""); - builder.with_colors(false); - let _ = builder.init(); - - polkadot_node_core_pvf::execute_worker_entrypoint(&cmd.socket_path); - Ok(()) - }, - None => { - let runner = cli.create_runner(&cli.run)?; - - // some parameters that are used by polkadot nodes, but that are not used by our binary - // let jaeger_agent = None; - // let grandpa_pause = None; - // let no_beefy = true; - // let telemetry_worker_handler = None; - // let is_collator = crate::service::IsCollator::No; - let overseer_gen = polkadot_service::overseer::RealOverseerGen; - runner.run_node_until_exit(|config| async move { - match config.role { - Role::Light => Err(sc_cli::Error::Service(sc_service::Error::Other( - "Light client is not supported by this node".into(), - ))), - _ => { - let is_collator = polkadot_service::IsCollator::No; - let grandpa_pause = None; - let enable_beefy = true; - let jaeger_agent = None; - let telemetry_worker_handle = None; - let program_path = None; - let overseer_enable_anyways = false; - - polkadot_service::new_full::( - config, - is_collator, - grandpa_pause, - enable_beefy, - jaeger_agent, - telemetry_worker_handle, - program_path, - overseer_enable_anyways, - overseer_gen, - ) - .map(|full| full.task_manager) - .map_err(service_error) - }, - } - }) - }, - } -} - -// We don't want to change 'service.rs' too much to ease future updates => it'll keep using -// its own error enum like original polkadot service does. -fn service_error(err: polkadot_service::Error) -> sc_cli::Error { - sc_cli::Error::Application(Box::new(err)) -} diff --git a/polkadot/bridges/bin/rialto/node/src/main.rs b/polkadot/bridges/bin/rialto/node/src/main.rs deleted file mode 100644 index 6dea84a309b..00000000000 --- a/polkadot/bridges/bin/rialto/node/src/main.rs +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto bridge node. - -#![warn(missing_docs)] - -mod chain_spec; -mod cli; -mod command; - -/// Run the Rialto Node -fn main() -> sc_cli::Result<()> { - command::run() -} diff --git a/polkadot/bridges/bin/rialto/runtime/Cargo.toml b/polkadot/bridges/bin/rialto/runtime/Cargo.toml deleted file mode 100644 index a64376efb22..00000000000 --- a/polkadot/bridges/bin/rialto/runtime/Cargo.toml +++ /dev/null @@ -1,146 +0,0 @@ -[package] -name = "rialto-runtime" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -hex-literal = "0.3" -libsecp256k1 = { version = "0.7", optional = true, default-features = false, features = ["hmac"] } -log = { version = "0.4.14", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true, features = ["derive"] } - -# Bridge dependencies - -bp-header-chain = { path = "../../../primitives/header-chain", default-features = false } -bp-message-dispatch = { path = "../../../primitives/message-dispatch", default-features = false } -bp-messages = { path = "../../../primitives/messages", default-features = false } -bp-millau = { path = "../../../primitives/chain-millau", default-features = false } -bp-rialto = { path = "../../../primitives/chain-rialto", default-features = false } -bp-runtime = { path = "../../../primitives/runtime", default-features = false } -bridge-runtime-common = { path = "../../runtime-common", default-features = false } -pallet-bridge-dispatch = { path = "../../../modules/dispatch", default-features = false } -pallet-bridge-grandpa = { path = "../../../modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../../modules/messages", default-features = false } -pallet-shift-session-manager = { path = "../../../modules/shift-session-manager", default-features = false } - -# Substrate Dependencies - -beefy-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-beefy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-beefy-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-mmr = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-mmr-primitives = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-offchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -# Polkadot (parachain) Dependencies - -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } -polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master", default-features = false } - -[dev-dependencies] -bridge-runtime-common = { path = "../../runtime-common", features = ["integrity-test"] } -libsecp256k1 = { version = "0.7", features = ["hmac"] } -static_assertions = "1.1" - -[build-dependencies] -substrate-wasm-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "beefy-primitives/std", - "bp-header-chain/std", - "bp-message-dispatch/std", - "bp-messages/std", - "bp-millau/std", - "bp-rialto/std", - "bp-runtime/std", - "bridge-runtime-common/std", - "codec/std", - "frame-benchmarking/std", - "frame-executive/std", - "frame-support/std", - "frame-system-rpc-runtime-api/std", - "frame-system/std", - "log/std", - "pallet-authority-discovery/std", - "pallet-babe/std", - "pallet-balances/std", - "pallet-beefy/std", - "pallet-beefy-mmr/std", - "pallet-bridge-dispatch/std", - "pallet-bridge-grandpa/std", - "pallet-bridge-messages/std", - "pallet-grandpa/std", - "pallet-mmr/std", - "pallet-shift-session-manager/std", - "pallet-sudo/std", - "pallet-timestamp/std", - "pallet-transaction-payment-rpc-runtime-api/std", - "pallet-transaction-payment/std", - "polkadot-primitives/std", - "polkadot-runtime-common/std", - "polkadot-runtime-parachains/std", - "scale-info/std", - "serde", - "sp-api/std", - "sp-authority-discovery/std", - "sp-block-builder/std", - "sp-consensus-babe/std", - "sp-core/std", - "sp-finality-grandpa/std", - "sp-inherents/std", - "sp-io/std", - "sp-mmr-primitives/std", - "sp-offchain/std", - "sp-runtime/std", - "sp-session/std", - "sp-std/std", - "sp-transaction-pool/std", - "sp-trie/std", - "sp-version/std", -] -runtime-benchmarks = [ - "bridge-runtime-common/runtime-benchmarks", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "libsecp256k1", - "pallet-bridge-messages/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] diff --git a/polkadot/bridges/bin/rialto/runtime/build.rs b/polkadot/bridges/bin/rialto/runtime/build.rs deleted file mode 100644 index cc865704327..00000000000 --- a/polkadot/bridges/bin/rialto/runtime/build.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use substrate_wasm_builder::WasmBuilder; - -fn main() { - WasmBuilder::new() - .with_current_project() - .import_memory() - .export_heap_base() - .build() -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/lib.rs b/polkadot/bridges/bin/rialto/runtime/src/lib.rs deleted file mode 100644 index 851ee3d5127..00000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/lib.rs +++ /dev/null @@ -1,998 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The Rialto runtime. This can be compiled with `#[no_std]`, ready for Wasm. - -#![cfg_attr(not(feature = "std"), no_std)] -// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit = "256"] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -// Make the WASM binary available. -#[cfg(feature = "std")] -include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); - -pub mod millau_messages; -pub mod parachains; - -use crate::millau_messages::{ToMillauMessagePayload, WithMillauMessageBridge}; - -use beefy_primitives::{crypto::AuthorityId as BeefyId, mmr::{MmrLeafVersion}, ValidatorSet}; -use bridge_runtime_common::messages::{ - source::estimate_message_dispatch_and_delivery_fee, MessageBridge, -}; -use pallet_grandpa::{ - fg_primitives, AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList, -}; -use sp_mmr_primitives::{ - DataOrHash, EncodableOpaqueLeaf, Error as MmrError, LeafDataProvider, - BatchProof as MmrBatchProof, Proof as MmrProof, LeafIndex as MmrLeafIndex -}; -use pallet_transaction_payment::{FeeDetails, Multiplier, RuntimeDispatchInfo}; -use sp_api::impl_runtime_apis; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; -use sp_runtime::{ - create_runtime_str, generic, impl_opaque_keys, - traits::{AccountIdLookup, Block as BlockT, Keccak256, NumberFor, OpaqueKeys}, - transaction_validity::{TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedPointNumber, FixedU128, MultiSignature, MultiSigner, Perquintill, -}; -use sp_std::{collections::btree_map::BTreeMap, prelude::*}; -#[cfg(feature = "std")] -use sp_version::NativeVersion; -use sp_version::RuntimeVersion; - -// A few exports that help ease life for downstream crates. -pub use frame_support::{ - construct_runtime, parameter_types, - traits::{Currency, ExistenceRequirement, Imbalance, KeyOwnerProofSystem}, - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, RuntimeDbWeight, Weight}, - StorageValue, -}; - -pub use frame_system::Call as SystemCall; -pub use pallet_balances::Call as BalancesCall; -pub use pallet_bridge_grandpa::Call as BridgeGrandpaMillauCall; -pub use pallet_bridge_messages::Call as MessagesCall; -pub use pallet_sudo::Call as SudoCall; -pub use pallet_timestamp::Call as TimestampCall; - -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use sp_runtime::{Perbill, Permill}; - -/// An index to a block. -pub type BlockNumber = bp_rialto::BlockNumber; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = bp_rialto::Signature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = bp_rialto::AccountId; - -/// The type for looking up accounts. We don't expect more than 4 billion of them, but you -/// never know... -pub type AccountIndex = u32; - -/// Balance of an account. -pub type Balance = bp_rialto::Balance; - -/// Index of a transaction in the chain. -pub type Index = bp_rialto::Index; - -/// A hash of some data used by the chain. -pub type Hash = bp_rialto::Hash; - -/// Hashing algorithm used by the chain. -pub type Hashing = bp_rialto::Hasher; - -/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know -/// the specifics of the runtime. They can then be made to be agnostic over specific formats -/// of data like extrinsics, allowing for them to continue syncing the network through upgrades -/// to even the core data structures. -pub mod opaque { - use super::*; - - pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; - - /// Opaque block header type. - pub type Header = generic::Header; - /// Opaque block type. - pub type Block = generic::Block; - /// Opaque block identifier type. - pub type BlockId = generic::BlockId; -} - -impl_opaque_keys! { - pub struct SessionKeys { - pub babe: Babe, - pub grandpa: Grandpa, - pub beefy: Beefy, - pub para_validator: Initializer, - pub para_assignment: SessionInfo, - pub authority_discovery: AuthorityDiscovery, - } -} - -/// This runtime version. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("rialto-runtime"), - impl_name: create_runtime_str!("rialto-runtime"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, - state_version: 1, -}; - -/// The version information used to identify this runtime when compiled natively. -#[cfg(feature = "std")] -pub fn native_version() -> NativeVersion { - NativeVersion { runtime_version: VERSION, can_author_with: Default::default() } -} - -parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const Version: RuntimeVersion = VERSION; - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 60_000_000, // ~0.06 ms = ~60 µs - write: 200_000_000, // ~0.2 ms = 200 µs - }; - pub const SS58Prefix: u8 = 48; -} - -impl frame_system::Config for Runtime { - /// The basic call filter to use in dispatchable. - type BaseCallFilter = frame_support::traits::Everything; - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = AccountIdLookup; - /// The index type for storing how many extrinsics an account has signed. - type Index = Index; - /// The index type for blocks. - type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = Hashing; - /// The header type. - type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous origin type. - type Origin = Origin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Version of the runtime. - type Version = Version; - /// Provides information about the pallet setup in the runtime. - type PalletInfo = PalletInfo; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The data to be stored in an account. - type AccountData = pallet_balances::AccountData; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - /// Weight information for the extrinsics of this pallet. - type SystemWeightInfo = (); - /// Block and extrinsics weights: base values and limits. - type BlockWeights = bp_rialto::BlockWeights; - /// The maximum length of a block (in bytes). - type BlockLength = bp_rialto::BlockLength; - /// The weight of database operations that the runtime can invoke. - type DbWeight = DbWeight; - /// The designated `SS58` prefix of this chain. - type SS58Prefix = SS58Prefix; - /// The set code logic, just the default since we're not a parachain. - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -/// The BABE epoch configuration at genesis. -pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration = - sp_consensus_babe::BabeEpochConfiguration { - c: bp_rialto::time_units::PRIMARY_PROBABILITY, - allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryVRFSlots, - }; - -parameter_types! { - pub const EpochDuration: u64 = bp_rialto::EPOCH_DURATION_IN_SLOTS as u64; - pub const ExpectedBlockTime: bp_rialto::Moment = bp_rialto::time_units::MILLISECS_PER_BLOCK; - pub const MaxAuthorities: u32 = 10; -} - -impl pallet_babe::Config for Runtime { - type EpochDuration = EpochDuration; - type ExpectedBlockTime = ExpectedBlockTime; - type MaxAuthorities = MaxAuthorities; - - // session module is the trigger - type EpochChangeTrigger = pallet_babe::ExternalTrigger; - - // equivocation related configuration - we don't expect any equivocations in our testnets - type KeyOwnerProofSystem = (); - type KeyOwnerProof = >::Proof; - type KeyOwnerIdentification = >::IdentificationTuple; - type HandleEquivocation = (); - - type DisabledValidators = (); - type WeightInfo = (); -} - -impl pallet_beefy::Config for Runtime { - type BeefyId = BeefyId; - type MaxAuthorities = MaxAuthorities; -} - -impl pallet_bridge_dispatch::Config for Runtime { - type Event = Event; - type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); - type Call = Call; - type CallFilter = frame_support::traits::Everything; - type EncodedCall = crate::millau_messages::FromMillauEncodedCall; - type SourceChainAccountId = bp_millau::AccountId; - type TargetChainAccountPublic = MultiSigner; - type TargetChainSignature = MultiSignature; - type AccountIdConverter = bp_rialto::AccountIdConverter; -} - -impl pallet_grandpa::Config for Runtime { - type Event = Event; - type Call = Call; - type MaxAuthorities = MaxAuthorities; - type KeyOwnerProofSystem = (); - type KeyOwnerProof = - >::Proof; - type KeyOwnerIdentification = >::IdentificationTuple; - type HandleEquivocation = (); - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); - type MaxAuthorities = MaxAuthorities; -} - -type MmrHash = ::Output; - -impl pallet_mmr::Config for Runtime { - const INDEXING_PREFIX: &'static [u8] = b"mmr"; - type Hashing = Keccak256; - type Hash = MmrHash; - type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; - type WeightInfo = (); - type LeafData = pallet_beefy_mmr::Pallet; -} - -parameter_types! { - /// Version of the produced MMR leaf. - /// - /// The version consists of two parts; - /// - `major` (3 bits) - /// - `minor` (5 bits) - /// - /// `major` should be updated only if decoding the previous MMR Leaf format from the payload - /// is not possible (i.e. backward incompatible change). - /// `minor` should be updated if fields are added to the previous MMR Leaf, which given SCALE - /// encoding does not prevent old leafs from being decoded. - /// - /// Hence we expect `major` to be changed really rarely (think never). - /// See [`MmrLeafVersion`] type documentation for more details. - pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(0, 0); -} - -impl pallet_beefy_mmr::Config for Runtime { - type LeafVersion = LeafVersion; - type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; - type BeefyDataProvider = (); -} - -parameter_types! { - pub const MinimumPeriod: u64 = bp_rialto::SLOT_DURATION / 2; -} - -impl pallet_timestamp::Config for Runtime { - /// A timestamp: milliseconds since the UNIX epoch. - type Moment = bp_rialto::Moment; - type OnTimestampSet = Babe; - type MinimumPeriod = MinimumPeriod; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -parameter_types! { - pub const ExistentialDeposit: bp_rialto::Balance = 500; - // For weight estimation, we assume that the most locks on an individual account will be 50. - // This number may need to be adjusted in the future if this assumption no longer holds true. - pub const MaxLocks: u32 = 50; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); - type MaxLocks = MaxLocks; - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; -} - -parameter_types! { - pub const TransactionBaseFee: Balance = 0; - pub const TransactionByteFee: Balance = 1; - pub const OperationalFeeMultiplier: u8 = 5; - // values for following parameters are copied from polkadot repo, but it is fine - // not to sync them - we're not going to make Rialto a full copy of one of Polkadot-like chains - pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); - pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); - pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); -} - -impl pallet_transaction_payment::Config for Runtime { - type Event = Event; - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type TransactionByteFee = TransactionByteFee; - type OperationalFeeMultiplier = OperationalFeeMultiplier; - type WeightToFee = bp_rialto::WeightToFee; - type FeeMultiplierUpdate = pallet_transaction_payment::TargetedFeeAdjustment< - Runtime, - TargetBlockFullness, - AdjustmentVariable, - MinimumMultiplier, - >; -} - -impl pallet_sudo::Config for Runtime { - type Event = Event; - type Call = Call; -} - -impl pallet_session::Config for Runtime { - type Event = Event; - type ValidatorId = ::AccountId; - type ValidatorIdOf = (); - type ShouldEndSession = Babe; - type NextSessionRotation = Babe; - type SessionManager = pallet_shift_session_manager::Pallet; - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - // TODO: update me (https://github.com/paritytech/parity-bridges-common/issues/78) - type WeightInfo = (); -} - -impl pallet_authority_discovery::Config for Runtime { - type MaxAuthorities = MaxAuthorities; -} - -parameter_types! { - /// This is a pretty unscientific cap. - /// - /// Note that once this is hit the pallet will essentially throttle incoming requests down to one - /// call per block. - pub const MaxRequests: u32 = 50; - - /// Number of headers to keep. - /// - /// Assuming the worst case of every header being finalized, we will keep headers at least for a - /// week. - pub const HeadersToKeep: u32 = 7 * bp_rialto::DAYS as u32; -} - -pub type MillauGrandpaInstance = (); -impl pallet_bridge_grandpa::Config for Runtime { - type BridgedChain = bp_millau::Millau; - type MaxRequests = MaxRequests; - type HeadersToKeep = HeadersToKeep; - type WeightInfo = pallet_bridge_grandpa::weights::MillauWeight; -} - -impl pallet_shift_session_manager::Config for Runtime {} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: bp_messages::MessageNonce = 8; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_millau::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_millau::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - // `IdentityFee` is used by Rialto => we may use weight directly - pub const GetDeliveryConfirmationTransactionFee: Balance = - bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT as _; - pub const RootAccountForPayments: Option = None; - pub const BridgedChainId: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID; -} - -/// Instance of the messages pallet used to relay messages to/from Millau chain. -pub type WithMillauMessagesInstance = (); - -impl pallet_bridge_messages::Config for Runtime { - type Event = Event; - type WeightInfo = pallet_bridge_messages::weights::MillauWeight; - type Parameter = millau_messages::RialtoToMillauMessagesParameter; - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = crate::millau_messages::ToMillauMessagePayload; - type OutboundMessageFee = Balance; - - type InboundPayload = crate::millau_messages::FromMillauMessagePayload; - type InboundMessageFee = bp_millau::Balance; - type InboundRelayer = bp_millau::AccountId; - - type AccountIdConverter = bp_rialto::AccountIdConverter; - - type TargetHeaderChain = crate::millau_messages::Millau; - type LaneMessageVerifier = crate::millau_messages::ToMillauMessageVerifier; - type MessageDeliveryAndDispatchPayment = - pallet_bridge_messages::instant_payments::InstantCurrencyPayments< - Runtime, - WithMillauMessagesInstance, - pallet_balances::Pallet, - GetDeliveryConfirmationTransactionFee, - >; - type OnMessageAccepted = (); - type OnDeliveryConfirmed = (); - - type SourceHeaderChain = crate::millau_messages::Millau; - type MessageDispatch = crate::millau_messages::FromMillauMessageDispatch; - type BridgedChainId = BridgedChainId; -} - -construct_runtime!( - pub enum Runtime where - Block = Block, - NodeBlock = opaque::Block, - UncheckedExtrinsic = UncheckedExtrinsic - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Sudo: pallet_sudo::{Pallet, Call, Config, Storage, Event}, - - // Must be before session. - Babe: pallet_babe::{Pallet, Call, Storage, Config, ValidateUnsigned}, - - Timestamp: pallet_timestamp::{Pallet, Call, Storage, Inherent}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - - // Consensus support. - AuthorityDiscovery: pallet_authority_discovery::{Pallet, Config}, - Session: pallet_session::{Pallet, Call, Storage, Event, Config}, - Grandpa: pallet_grandpa::{Pallet, Call, Storage, Config, Event}, - ShiftSessionManager: pallet_shift_session_manager::{Pallet}, - - // BEEFY Bridges support. - Beefy: pallet_beefy::{Pallet, Storage, Config}, - Mmr: pallet_mmr::{Pallet, Storage}, - MmrLeaf: pallet_beefy_mmr::{Pallet, Storage}, - - // Millau bridge modules. - BridgeMillauGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage}, - BridgeDispatch: pallet_bridge_dispatch::{Pallet, Event}, - BridgeMillauMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, - - // Parachain modules. - ParachainsOrigin: polkadot_runtime_parachains::origin::{Pallet, Origin}, - Configuration: polkadot_runtime_parachains::configuration::{Pallet, Call, Storage, Config}, - Shared: polkadot_runtime_parachains::shared::{Pallet, Call, Storage}, - Inclusion: polkadot_runtime_parachains::inclusion::{Pallet, Call, Storage, Event}, - ParasInherent: polkadot_runtime_parachains::paras_inherent::{Pallet, Call, Storage, Inherent}, - Scheduler: polkadot_runtime_parachains::scheduler::{Pallet, Storage}, - Paras: polkadot_runtime_parachains::paras::{Pallet, Call, Storage, Event, Config}, - Initializer: polkadot_runtime_parachains::initializer::{Pallet, Call, Storage}, - Dmp: polkadot_runtime_parachains::dmp::{Pallet, Call, Storage}, - Ump: polkadot_runtime_parachains::ump::{Pallet, Call, Storage, Event}, - Hrmp: polkadot_runtime_parachains::hrmp::{Pallet, Call, Storage, Event, Config}, - SessionInfo: polkadot_runtime_parachains::session_info::{Pallet, Storage}, - - // Parachain Onboarding Pallets - Registrar: polkadot_runtime_common::paras_registrar::{Pallet, Call, Storage, Event}, - Slots: polkadot_runtime_common::slots::{Pallet, Call, Storage, Event}, - ParasSudoWrapper: polkadot_runtime_common::paras_sudo_wrapper::{Pallet, Call}, - } -); - -/// The address format for describing accounts. -pub type Address = sp_runtime::MultiAddress; -/// Block header type as expected by this runtime. -pub type Header = generic::Header; -/// Block type as expected by this runtime. -pub type Block = generic::Block; -/// A Block signed with a Justification -pub type SignedBlock = generic::SignedBlock; -/// `BlockId` type as expected by this runtime. -pub type BlockId = generic::BlockId; -/// The `SignedExtension` to the basic transaction logic. -pub type SignedExtra = ( - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckTxVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, -); -/// The payload being signed in transactions. -pub type SignedPayload = generic::SignedPayload; -/// Unchecked extrinsic type as expected by this runtime. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; -/// Extrinsic type that has already been checked. -pub type CheckedExtrinsic = generic::CheckedExtrinsic; -/// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive< - Runtime, - Block, - frame_system::ChainContext, - Runtime, - AllPalletsWithSystem, ->; - -#[cfg(feature = "runtime-benchmarks")] -#[macro_use] -extern crate frame_benchmarking; - -#[cfg(feature = "runtime-benchmarks")] -mod benches { - define_benchmarks!( - [pallet_bridge_messages, - MessagesBench::] - [pallet_bridge_grandpa, BridgeMillauGrandpa] - ); -} -pub type MmrHashing = ::Hashing; - -impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block); - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - OpaqueMetadata::new(Runtime::metadata().into()) - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { - System::account_nonce(account) - } - } - - impl beefy_primitives::BeefyApi for Runtime { - fn validator_set() -> Option> { - Beefy::validator_set() - } - } - - impl sp_mmr_primitives::MmrApi for Runtime { - fn generate_proof(leaf_index: u64) - -> Result<(EncodableOpaqueLeaf, MmrProof), MmrError> - { - Mmr::generate_batch_proof(vec![leaf_index]) - .and_then(|(leaves, proof)| Ok(( - EncodableOpaqueLeaf::from_leaf(&leaves[0]), - MmrBatchProof::into_single_leaf_proof(proof)? - ))) - } - - fn verify_proof(leaf: EncodableOpaqueLeaf, proof: MmrProof) - -> Result<(), MmrError> - { - - pub type MmrLeaf = <::LeafData as LeafDataProvider>::LeafData; - let leaf: MmrLeaf = leaf - .into_opaque_leaf() - .try_decode() - .ok_or(MmrError::Verify)?; - Mmr::verify_leaves(vec![leaf], MmrProof::into_batch_proof(proof)) - } - - fn verify_proof_stateless( - root: Hash, - leaf: EncodableOpaqueLeaf, - proof: MmrProof - ) -> Result<(), MmrError> { - let node = DataOrHash::Data(leaf.into_opaque_leaf()); - pallet_mmr::verify_leaves_proof::(root, vec![node], MmrProof::into_batch_proof(proof)) - } - - fn generate_batch_proof(leaf_indices: Vec) - -> Result<(Vec, MmrBatchProof), MmrError> - { - Mmr::generate_batch_proof(leaf_indices) - .map(|(leaves, proof)| (leaves.into_iter().map(|leaf| EncodableOpaqueLeaf::from_leaf(&leaf)).collect(), proof)) - } - - fn verify_batch_proof(leaves: Vec, proof: MmrBatchProof) - -> Result<(), MmrError> - { - pub type MmrLeaf = <::LeafData as LeafDataProvider>::LeafData; - let leaves = leaves.into_iter().map(|leaf| - leaf.into_opaque_leaf() - .try_decode() - .ok_or(MmrError::Verify)).collect::, MmrError>>()?; - Mmr::verify_leaves(leaves, proof) - } - - fn verify_batch_proof_stateless( - root: Hash, - leaves: Vec, - proof: MmrBatchProof - ) -> Result<(), MmrError> { - let nodes = leaves.into_iter().map(|leaf|DataOrHash::Data(leaf.into_opaque_leaf())).collect(); - pallet_mmr::verify_leaves_proof::(root, nodes, proof) - } - } - - impl bp_millau::MillauFinalityApi for Runtime { - fn best_finalized() -> (bp_millau::BlockNumber, bp_millau::Hash) { - let header = BridgeMillauGrandpa::best_finalized(); - (header.number, header.hash()) - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - block_hash: ::Hash, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx, block_hash) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeGenesisConfiguration { - // The choice of `c` parameter (where `1 - c` represents the - // probability of a slot being empty), is done in accordance to the - // slot duration and expected target block time, for safely - // resisting network delays of maximum two seconds. - // - sp_consensus_babe::BabeGenesisConfiguration { - slot_duration: Babe::slot_duration(), - epoch_length: EpochDuration::get(), - c: BABE_GENESIS_EPOCH_CONFIG.c, - genesis_authorities: Babe::authorities().to_vec(), - randomness: Babe::randomness(), - allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots, - } - } - - fn current_epoch_start() -> sp_consensus_babe::Slot { - Babe::current_epoch_start() - } - - fn current_epoch() -> sp_consensus_babe::Epoch { - Babe::current_epoch() - } - - fn next_epoch() -> sp_consensus_babe::Epoch { - Babe::next_epoch() - } - - fn generate_key_ownership_proof( - _slot: sp_consensus_babe::Slot, - _authority_id: sp_consensus_babe::AuthorityId, - ) -> Option { - None - } - - fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: sp_consensus_babe::EquivocationProof<::Header>, - key_owner_proof: sp_consensus_babe::OpaqueKeyOwnershipProof, - ) -> Option<()> { - let key_owner_proof = key_owner_proof.decode()?; - - Babe::submit_unsigned_equivocation_report( - equivocation_proof, - key_owner_proof, - ) - } - } - - impl polkadot_primitives::runtime_api::ParachainHost for Runtime { - fn validators() -> Vec { - polkadot_runtime_parachains::runtime_api_impl::v2::validators::() - } - - fn validator_groups() -> (Vec>, polkadot_primitives::v2::GroupRotationInfo) { - polkadot_runtime_parachains::runtime_api_impl::v2::validator_groups::() - } - - fn availability_cores() -> Vec> { - polkadot_runtime_parachains::runtime_api_impl::v2::availability_cores::() - } - - fn persisted_validation_data(para_id: polkadot_primitives::v2::Id, assumption: polkadot_primitives::v2::OccupiedCoreAssumption) - -> Option> { - polkadot_runtime_parachains::runtime_api_impl::v2::persisted_validation_data::(para_id, assumption) - } - - fn assumed_validation_data( - para_id: polkadot_primitives::v2::Id, - expected_persisted_validation_data_hash: Hash, - ) -> Option<(polkadot_primitives::v2::PersistedValidationData, polkadot_primitives::v2::ValidationCodeHash)> { - polkadot_runtime_parachains::runtime_api_impl::v2::assumed_validation_data::( - para_id, - expected_persisted_validation_data_hash, - ) - } - - fn check_validation_outputs( - para_id: polkadot_primitives::v2::Id, - outputs: polkadot_primitives::v2::CandidateCommitments, - ) -> bool { - polkadot_runtime_parachains::runtime_api_impl::v2::check_validation_outputs::(para_id, outputs) - } - - fn session_index_for_child() -> polkadot_primitives::v2::SessionIndex { - polkadot_runtime_parachains::runtime_api_impl::v2::session_index_for_child::() - } - - fn validation_code(para_id: polkadot_primitives::v2::Id, assumption: polkadot_primitives::v2::OccupiedCoreAssumption) - -> Option { - polkadot_runtime_parachains::runtime_api_impl::v2::validation_code::(para_id, assumption) - } - - fn candidate_pending_availability(para_id: polkadot_primitives::v2::Id) -> Option> { - polkadot_runtime_parachains::runtime_api_impl::v2::candidate_pending_availability::(para_id) - } - - fn candidate_events() -> Vec> { - polkadot_runtime_parachains::runtime_api_impl::v2::candidate_events::(|ev| { - match ev { - Event::Inclusion(ev) => { - Some(ev) - } - _ => None, - } - }) - } - - fn session_info(index: polkadot_primitives::v2::SessionIndex) -> Option { - polkadot_runtime_parachains::runtime_api_impl::v2::session_info::(index) - } - - fn dmq_contents(recipient: polkadot_primitives::v2::Id) -> Vec> { - polkadot_runtime_parachains::runtime_api_impl::v2::dmq_contents::(recipient) - } - - fn inbound_hrmp_channels_contents( - recipient: polkadot_primitives::v2::Id - ) -> BTreeMap>> { - polkadot_runtime_parachains::runtime_api_impl::v2::inbound_hrmp_channels_contents::(recipient) - } - - fn validation_code_by_hash(hash: polkadot_primitives::v2::ValidationCodeHash) -> Option { - polkadot_runtime_parachains::runtime_api_impl::v2::validation_code_by_hash::(hash) - } - - fn on_chain_votes() -> Option> { - polkadot_runtime_parachains::runtime_api_impl::v2::on_chain_votes::() - } - - fn submit_pvf_check_statement(stmt: polkadot_primitives::v2::PvfCheckStatement, signature: polkadot_primitives::v2::ValidatorSignature) { - polkadot_runtime_parachains::runtime_api_impl::v2::submit_pvf_check_statement::(stmt, signature) - } - - fn pvfs_require_precheck() -> Vec { - polkadot_runtime_parachains::runtime_api_impl::v2::pvfs_require_precheck::() - } - - fn validation_code_hash(para_id: polkadot_primitives::v2::Id, assumption: polkadot_primitives::v2::OccupiedCoreAssumption) - -> Option - { - polkadot_runtime_parachains::runtime_api_impl::v2::validation_code_hash::(para_id, assumption) - } - } - - impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { - fn authorities() -> Vec { - polkadot_runtime_parachains::runtime_api_impl::v2::relevant_authority_ids::() - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< - Block, - Balance, - > for Runtime { - fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - fn query_fee_details(uxt: ::Extrinsic, len: u32) -> FeeDetails { - TransactionPayment::query_fee_details(uxt, len) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn current_set_id() -> fg_primitives::SetId { - Grandpa::current_set_id() - } - - fn grandpa_authorities() -> GrandpaAuthorityList { - Grandpa::grandpa_authorities() - } - - fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: fg_primitives::EquivocationProof< - ::Hash, - NumberFor, - >, - key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, - ) -> Option<()> { - let key_owner_proof = key_owner_proof.decode()?; - - Grandpa::submit_unsigned_equivocation_report( - equivocation_proof, - key_owner_proof, - ) - } - - fn generate_key_ownership_proof( - _set_id: fg_primitives::SetId, - _authority_id: GrandpaId, - ) -> Option { - // NOTE: this is the only implementation possible since we've - // defined our key owner proof type as a bottom type (i.e. a type - // with no values). - None - } - } - - impl bp_millau::ToMillauOutboundLaneApi for Runtime { - fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_messages::LaneId, - payload: ToMillauMessagePayload, - millau_to_this_conversion_rate: Option, - ) -> Option { - estimate_message_dispatch_and_delivery_fee::( - &payload, - WithMillauMessageBridge::RELAYER_FEE_PERCENT, - millau_to_this_conversion_rate, - ).ok() - } - - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec> { - bridge_runtime_common::messages_api::outbound_message_details::< - Runtime, - WithMillauMessagesInstance, - WithMillauMessageBridge, - >(lane, begin, end) - } - } -} - -/// Millau account ownership digest from Rialto. -/// -/// The byte vector returned by this function should be signed with a Millau account private key. -/// This way, the owner of `rialto_account_id` on Rialto proves that the 'millau' account private -/// key is also under his control. -pub fn rialto_to_millau_account_ownership_digest( - millau_call: &Call, - rialto_account_id: AccountId, - millau_spec_version: SpecVersion, -) -> sp_std::vec::Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_dispatch::account_ownership_digest( - millau_call, - rialto_account_id, - millau_spec_version, - bp_runtime::RIALTO_CHAIN_ID, - bp_runtime::MILLAU_CHAIN_ID, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn call_size() { - const BRIDGES_PALLETS_MAX_CALL_SIZE: usize = 200; - assert!( - core::mem::size_of::>() <= - BRIDGES_PALLETS_MAX_CALL_SIZE - ); - assert!( - core::mem::size_of::>() <= - BRIDGES_PALLETS_MAX_CALL_SIZE - ); - // Largest inner Call is `pallet_session::Call` with a size of 224 bytes. This size is a - // result of large `SessionKeys` struct. - // Total size of Rialto runtime Call is 232. - const MAX_CALL_SIZE: usize = 232; - assert!(core::mem::size_of::() <= MAX_CALL_SIZE); - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs b/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs deleted file mode 100644 index fba6605c358..00000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/millau_messages.rs +++ /dev/null @@ -1,558 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to serve Millau <-> Rialto messages. - -use crate::Runtime; - -use bp_messages::{ - source_chain::{SenderOrigin, TargetHeaderChain}, - target_chain::{ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageNonce, Parameter as MessagesParameter, -}; -use bp_runtime::{Chain, ChainId, MILLAU_CHAIN_ID, RIALTO_CHAIN_ID}; -use bridge_runtime_common::messages::{self, MessageBridge, MessageTransaction}; -use codec::{Decode, Encode}; -use frame_support::{ - parameter_types, - weights::{DispatchClass, Weight}, - RuntimeDebug, -}; -use scale_info::TypeInfo; -use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128}; -use sp_std::{convert::TryFrom, ops::RangeInclusive}; - -/// Initial value of `MillauToRialtoConversionRate` parameter. -pub const INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE: FixedU128 = - FixedU128::from_inner(FixedU128::DIV); -/// Initial value of `MillauFeeMultiplier` parameter. -pub const INITIAL_MILLAU_FEE_MULTIPLIER: FixedU128 = FixedU128::from_inner(FixedU128::DIV); - -parameter_types! { - /// Millau to Rialto conversion rate. Initially we treat both tokens as equal. - pub storage MillauToRialtoConversionRate: FixedU128 = INITIAL_MILLAU_TO_RIALTO_CONVERSION_RATE; - /// Fee multiplier value at Millau chain. - pub storage MillauFeeMultiplier: FixedU128 = INITIAL_MILLAU_FEE_MULTIPLIER; -} - -/// Message payload for Rialto -> Millau messages. -pub type ToMillauMessagePayload = - messages::source::FromThisChainMessagePayload; - -/// Message verifier for Rialto -> Millau messages. -pub type ToMillauMessageVerifier = - messages::source::FromThisChainMessageVerifier; - -/// Message payload for Millau -> Rialto messages. -pub type FromMillauMessagePayload = - messages::target::FromBridgedChainMessagePayload; - -/// Encoded Rialto Call as it comes from Millau. -pub type FromMillauEncodedCall = messages::target::FromBridgedChainEncodedMessageCall; - -/// Call-dispatch based message dispatch for Millau -> Rialto messages. -pub type FromMillauMessageDispatch = messages::target::FromBridgedChainMessageDispatch< - WithMillauMessageBridge, - crate::Runtime, - pallet_balances::Pallet, - (), ->; - -/// Messages proof for Millau -> Rialto messages. -pub type FromMillauMessagesProof = messages::target::FromBridgedChainMessagesProof; - -/// Messages delivery proof for Rialto -> Millau messages. -pub type ToMillauMessagesDeliveryProof = - messages::source::FromBridgedChainMessagesDeliveryProof; - -/// Millau <-> Rialto message bridge. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct WithMillauMessageBridge; - -impl MessageBridge for WithMillauMessageBridge { - const RELAYER_FEE_PERCENT: u32 = 10; - const THIS_CHAIN_ID: ChainId = RIALTO_CHAIN_ID; - const BRIDGED_CHAIN_ID: ChainId = MILLAU_CHAIN_ID; - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = bp_rialto::WITH_RIALTO_MESSAGES_PALLET_NAME; - - type ThisChain = Rialto; - type BridgedChain = Millau; - - fn bridged_balance_to_this_balance( - bridged_balance: bp_millau::Balance, - bridged_to_this_conversion_rate_override: Option, - ) -> bp_rialto::Balance { - let conversion_rate = bridged_to_this_conversion_rate_override - .unwrap_or_else(|| MillauToRialtoConversionRate::get()); - bp_rialto::Balance::try_from(conversion_rate.saturating_mul_int(bridged_balance)) - .unwrap_or(bp_rialto::Balance::MAX) - } -} - -/// Rialto chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Rialto; - -impl messages::ChainWithMessages for Rialto { - type Hash = bp_rialto::Hash; - type AccountId = bp_rialto::AccountId; - type Signer = bp_rialto::AccountSigner; - type Signature = bp_rialto::Signature; - type Weight = Weight; - type Balance = bp_rialto::Balance; -} - -impl messages::ThisChainWithMessages for Rialto { - type Origin = crate::Origin; - type Call = crate::Call; - - fn is_message_accepted(send_origin: &Self::Origin, lane: &LaneId) -> bool { - send_origin.linked_account().is_some() && (*lane == [0, 0, 0, 0] || *lane == [0, 0, 0, 1]) - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - MessageNonce::MAX - } - - fn estimate_delivery_confirmation_transaction() -> MessageTransaction { - let inbound_data_size = InboundLaneData::::encoded_size_hint( - bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - 1, - 1, - ) - .unwrap_or(u32::MAX); - - MessageTransaction { - dispatch_weight: bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - size: inbound_data_size - .saturating_add(bp_millau::EXTRA_STORAGE_PROOF_SIZE) - .saturating_add(bp_rialto::TX_EXTRA_BYTES), - } - } - - fn transaction_payment(transaction: MessageTransaction) -> bp_rialto::Balance { - // `transaction` may represent transaction from the future, when multiplier value will - // be larger, so let's use slightly increased value - let multiplier = FixedU128::saturating_from_rational(110, 100) - .saturating_mul(pallet_transaction_payment::Pallet::::next_fee_multiplier()); - // in our testnets, both per-byte fee and weight-to-fee are 1:1 - messages::transaction_payment( - bp_rialto::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - 1, - multiplier, - |weight| weight as _, - transaction, - ) - } -} - -/// Millau chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Millau; - -impl messages::ChainWithMessages for Millau { - type Hash = bp_millau::Hash; - type AccountId = bp_millau::AccountId; - type Signer = bp_millau::AccountSigner; - type Signature = bp_millau::Signature; - type Weight = Weight; - type Balance = bp_millau::Balance; -} - -impl messages::BridgedChainWithMessages for Millau { - fn maximal_extrinsic_size() -> u32 { - bp_millau::Millau::max_extrinsic_size() - } - - fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { - // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages::target::maximal_incoming_message_dispatch_weight( - bp_millau::Millau::max_extrinsic_weight(), - ); - - // we're charging for payload bytes in `WithMillauMessageBridge::transaction_payment` - // function - // - // this bridge may be used to deliver all kind of messages, so we're not making any - // assumptions about minimal dispatch weight here - - 0..=upper_limit - } - - fn estimate_delivery_transaction( - message_payload: &[u8], - include_pay_dispatch_fee_cost: bool, - message_dispatch_weight: Weight, - ) -> MessageTransaction { - let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); - let extra_bytes_in_payload = Weight::from(message_payload_len) - .saturating_sub(pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); - - MessageTransaction { - dispatch_weight: extra_bytes_in_payload - .saturating_mul(bp_millau::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) - .saturating_add(bp_millau::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) - .saturating_sub(if include_pay_dispatch_fee_cost { - 0 - } else { - bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT - }) - .saturating_add(message_dispatch_weight), - size: message_payload_len - .saturating_add(bp_rialto::EXTRA_STORAGE_PROOF_SIZE) - .saturating_add(bp_millau::TX_EXTRA_BYTES), - } - } - - fn transaction_payment(transaction: MessageTransaction) -> bp_millau::Balance { - // we don't have a direct access to the value of multiplier at Millau chain - // => it is a messages module parameter - let multiplier = MillauFeeMultiplier::get(); - // in our testnets, both per-byte fee and weight-to-fee are 1:1 - messages::transaction_payment( - bp_millau::BlockWeights::get().get(DispatchClass::Normal).base_extrinsic, - 1, - multiplier, - |weight| weight as _, - transaction, - ) - } -} - -impl TargetHeaderChain for Millau { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof of one or several keys; - // - id of the lane we prove state of. - type MessagesDeliveryProof = ToMillauMessagesDeliveryProof; - - fn verify_message(payload: &ToMillauMessagePayload) -> Result<(), Self::Error> { - messages::source::verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - messages::source::verify_messages_delivery_proof::< - WithMillauMessageBridge, - Runtime, - crate::MillauGrandpaInstance, - >(proof) - } -} - -impl SourceHeaderChain for Millau { - type Error = &'static str; - // The proof is: - // - hash of the header this proof has been created with; - // - the storage proof of one or several keys; - // - id of the lane we prove messages for; - // - inclusive range of messages nonces that are proved. - type MessagesProof = FromMillauMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result>, Self::Error> { - messages::target::verify_messages_proof::< - WithMillauMessageBridge, - Runtime, - crate::MillauGrandpaInstance, - >(proof, messages_count) - } -} - -impl SenderOrigin for crate::Origin { - fn linked_account(&self) -> Option { - match self.caller { - crate::OriginCaller::system(frame_system::RawOrigin::Signed(ref submitter)) => - Some(submitter.clone()), - crate::OriginCaller::system(frame_system::RawOrigin::Root) | - crate::OriginCaller::system(frame_system::RawOrigin::None) => - crate::RootAccountForPayments::get(), - _ => None, - } - } -} - -/// Rialto -> Millau message lane pallet parameters. -#[derive(RuntimeDebug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] -pub enum RialtoToMillauMessagesParameter { - /// The conversion formula we use is: `RialtoTokens = MillauTokens * conversion_rate`. - MillauToRialtoConversionRate(FixedU128), -} - -impl MessagesParameter for RialtoToMillauMessagesParameter { - fn save(&self) { - match *self { - RialtoToMillauMessagesParameter::MillauToRialtoConversionRate(ref conversion_rate) => - MillauToRialtoConversionRate::set(conversion_rate), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - AccountId, Call, DbWeight, ExistentialDeposit, MillauGrandpaInstance, Runtime, SystemCall, - SystemConfig, WithMillauMessagesInstance, VERSION, - }; - use bp_message_dispatch::CallOrigin; - use bp_messages::{ - target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - MessageKey, - }; - use bp_runtime::{derive_account_id, messages::DispatchFeePayment, Chain, SourceAccount}; - use bridge_runtime_common::{ - assert_complete_bridge_types, - integrity::{ - assert_complete_bridge_constants, AssertBridgeMessagesPalletConstants, - AssertBridgePalletNames, AssertChainConstants, AssertCompleteBridgeConstants, - }, - messages::target::{FromBridgedChainEncodedMessageCall, FromBridgedChainMessagePayload}, - }; - use frame_support::{ - traits::Currency, - weights::{GetDispatchInfo, WeightToFeePolynomial}, - }; - use sp_runtime::traits::Convert; - - #[test] - fn transfer_happens_when_dispatch_fee_is_paid_at_target_chain() { - // this test actually belongs to the `bridge-runtime-common` crate, but there we have no - // mock runtime. Making another one there just for this test, given that both crates - // live n single repo is an overkill - let mut ext: sp_io::TestExternalities = - SystemConfig::default().build_storage::().unwrap().into(); - ext.execute_with(|| { - let bridge = MILLAU_CHAIN_ID; - let call: Call = SystemCall::set_heap_pages { pages: 64 }.into(); - let dispatch_weight = call.get_dispatch_info().weight; - let dispatch_fee = ::WeightToFee::weight_to_fee( - &dispatch_weight, - ); - assert!(dispatch_fee > 0); - - // create relayer account with minimal balance - let relayer_account: AccountId = [1u8; 32].into(); - let initial_amount = ExistentialDeposit::get(); - let _ = as Currency>::deposit_creating( - &relayer_account, - initial_amount, - ); - - // create dispatch account with minimal balance + dispatch fee - let dispatch_account = derive_account_id::< - ::SourceChainAccountId, - >(bridge, SourceAccount::Root); - let dispatch_account = - ::AccountIdConverter::convert( - dispatch_account, - ); - let _ = as Currency>::deposit_creating( - &dispatch_account, - initial_amount + dispatch_fee, - ); - - // dispatch message with intention to pay dispatch fee at the target chain - FromMillauMessageDispatch::dispatch( - &relayer_account, - DispatchMessage { - key: MessageKey { lane_id: Default::default(), nonce: 0 }, - data: DispatchMessageData { - payload: Ok(FromBridgedChainMessagePayload:: { - spec_version: VERSION.spec_version, - weight: dispatch_weight, - origin: CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - call: FromBridgedChainEncodedMessageCall::new(call.encode()), - }), - fee: 1, - }, - }, - ); - - // ensure that fee has been transferred from dispatch to relayer account - assert_eq!( - as Currency>::free_balance( - &relayer_account - ), - initial_amount + dispatch_fee, - ); - assert_eq!( - as Currency>::free_balance( - &dispatch_account - ), - initial_amount, - ); - }); - } - - #[test] - fn ensure_rialto_message_lane_weights_are_correct() { - type Weights = pallet_bridge_messages::weights::MillauWeight; - - pallet_bridge_messages::ensure_weights_are_correct::( - bp_rialto::DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, - bp_rialto::ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, - bp_rialto::MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT, - DbWeight::get(), - ); - - let max_incoming_message_proof_size = bp_millau::EXTRA_STORAGE_PROOF_SIZE.saturating_add( - messages::target::maximal_incoming_message_size(bp_rialto::Rialto::max_extrinsic_size()), - ); - pallet_bridge_messages::ensure_able_to_receive_message::( - bp_rialto::Rialto::max_extrinsic_size(), - bp_rialto::Rialto::max_extrinsic_weight(), - max_incoming_message_proof_size, - messages::target::maximal_incoming_message_dispatch_weight( - bp_rialto::Rialto::max_extrinsic_weight(), - ), - ); - - let max_incoming_inbound_lane_data_proof_size = - bp_messages::InboundLaneData::<()>::encoded_size_hint( - bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as _, - bp_rialto::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX as _, - ) - .unwrap_or(u32::MAX); - pallet_bridge_messages::ensure_able_to_receive_confirmation::( - bp_rialto::Rialto::max_extrinsic_size(), - bp_rialto::Rialto::max_extrinsic_weight(), - max_incoming_inbound_lane_data_proof_size, - bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_rialto::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - DbWeight::get(), - ); - } - - #[test] - fn ensure_bridge_integrity() { - assert_complete_bridge_types!( - runtime: Runtime, - with_bridged_chain_grandpa_instance: MillauGrandpaInstance, - with_bridged_chain_messages_instance: WithMillauMessagesInstance, - bridge: WithMillauMessageBridge, - this_chain: bp_rialto::Rialto, - bridged_chain: bp_millau::Millau, - this_chain_account_id_converter: bp_rialto::AccountIdConverter - ); - - assert_complete_bridge_constants::< - Runtime, - MillauGrandpaInstance, - WithMillauMessagesInstance, - WithMillauMessageBridge, - bp_rialto::Rialto, - >(AssertCompleteBridgeConstants { - this_chain_constants: AssertChainConstants { - block_length: bp_rialto::BlockLength::get(), - block_weights: bp_rialto::BlockWeights::get(), - }, - messages_pallet_constants: AssertBridgeMessagesPalletConstants { - max_unrewarded_relayers_in_bridged_confirmation_tx: - bp_millau::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_messages_in_bridged_confirmation_tx: - bp_millau::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - bridged_chain_id: bp_runtime::MILLAU_CHAIN_ID, - }, - pallet_names: AssertBridgePalletNames { - with_this_chain_messages_pallet_name: bp_rialto::WITH_RIALTO_MESSAGES_PALLET_NAME, - with_bridged_chain_grandpa_pallet_name: bp_millau::WITH_MILLAU_GRANDPA_PALLET_NAME, - with_bridged_chain_messages_pallet_name: - bp_millau::WITH_MILLAU_MESSAGES_PALLET_NAME, - }, - }); - - assert_eq!( - MillauToRialtoConversionRate::key().to_vec(), - bp_runtime::storage_parameter_key( - bp_rialto::MILLAU_TO_RIALTO_CONVERSION_RATE_PARAMETER_NAME - ) - .0, - ); - } - - #[test] - #[ignore] - fn no_stack_overflow_when_decoding_nested_call_during_dispatch() { - // this test is normally ignored, because it only makes sense to run it in release mode - - let mut ext: sp_io::TestExternalities = - SystemConfig::default().build_storage::().unwrap().into(); - ext.execute_with(|| { - let bridge = MILLAU_CHAIN_ID; - - let mut call: Call = SystemCall::set_heap_pages { pages: 64 }.into(); - - for _i in 0..3000 { - call = Call::Sudo(pallet_sudo::Call::sudo { call: Box::new(call) }); - } - - let dispatch_weight = 500; - let dispatch_fee = ::WeightToFee::weight_to_fee( - &dispatch_weight, - ); - assert!(dispatch_fee > 0); - - // create relayer account with minimal balance - let relayer_account: AccountId = [1u8; 32].into(); - let initial_amount = ExistentialDeposit::get(); - let _ = as Currency>::deposit_creating( - &relayer_account, - initial_amount, - ); - - // create dispatch account with minimal balance + dispatch fee - let dispatch_account = derive_account_id::< - ::SourceChainAccountId, - >(bridge, SourceAccount::Root); - let dispatch_account = - ::AccountIdConverter::convert( - dispatch_account, - ); - let _ = as Currency>::deposit_creating( - &dispatch_account, - initial_amount + dispatch_fee, - ); - - // dispatch message with intention to pay dispatch fee at the target chain - // - // this is where the stack overflow has happened before the fix has been applied - FromMillauMessageDispatch::dispatch( - &relayer_account, - DispatchMessage { - key: MessageKey { lane_id: Default::default(), nonce: 0 }, - data: DispatchMessageData { - payload: Ok(FromBridgedChainMessagePayload:: { - spec_version: VERSION.spec_version, - weight: dispatch_weight, - origin: CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - call: FromBridgedChainEncodedMessageCall::new(call.encode()), - }), - fee: 1, - }, - }, - ); - }); - } -} diff --git a/polkadot/bridges/bin/rialto/runtime/src/parachains.rs b/polkadot/bridges/bin/rialto/runtime/src/parachains.rs deleted file mode 100644 index 20a9aeb28c0..00000000000 --- a/polkadot/bridges/bin/rialto/runtime/src/parachains.rs +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Parachains support in Rialto runtime. - -use crate::{ - AccountId, Babe, Balance, Balances, BlockNumber, Call, Event, Origin, Registrar, Runtime, - Slots, UncheckedExtrinsic, -}; - -use frame_support::{parameter_types, weights::Weight}; -use frame_system::EnsureRoot; -use polkadot_primitives::v2::ValidatorIndex; -use polkadot_runtime_common::{paras_registrar, paras_sudo_wrapper, slots}; -use polkadot_runtime_parachains::{ - configuration as parachains_configuration, dmp as parachains_dmp, hrmp as parachains_hrmp, - inclusion as parachains_inclusion, initializer as parachains_initializer, - origin as parachains_origin, paras as parachains_paras, - paras_inherent as parachains_paras_inherent, scheduler as parachains_scheduler, - session_info as parachains_session_info, shared as parachains_shared, ump as parachains_ump, -}; -use sp_runtime::transaction_validity::TransactionPriority; - -impl frame_system::offchain::SendTransactionTypes for Runtime -where - Call: From, -{ - type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = Call; -} - -/// Special `RewardValidators` that does nothing ;) -pub struct RewardValidators; -impl polkadot_runtime_parachains::inclusion::RewardValidators for RewardValidators { - fn reward_backing(_: impl IntoIterator) {} - fn reward_bitfields(_: impl IntoIterator) {} -} - -// all required parachain modules from `polkadot-runtime-parachains` crate - -impl parachains_configuration::Config for Runtime { - type WeightInfo = parachains_configuration::TestWeightInfo; -} - -impl parachains_dmp::Config for Runtime {} - -impl parachains_hrmp::Config for Runtime { - type Event = Event; - type Origin = Origin; - type Currency = Balances; - type WeightInfo = parachains_hrmp::TestWeightInfo; -} - -impl parachains_inclusion::Config for Runtime { - type Event = Event; - type RewardValidators = RewardValidators; - type DisputesHandler = (); -} - -impl parachains_initializer::Config for Runtime { - type Randomness = pallet_babe::RandomnessFromOneEpochAgo; - type ForceOrigin = EnsureRoot; - type WeightInfo = (); -} - -impl parachains_origin::Config for Runtime {} - -parameter_types! { - pub const ParasUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); -} - -impl parachains_paras::Config for Runtime { - type Event = Event; - type WeightInfo = parachains_paras::TestWeightInfo; - type UnsignedPriority = ParasUnsignedPriority; - type NextSessionRotation = Babe; -} - -impl parachains_paras_inherent::Config for Runtime { - type WeightInfo = parachains_paras_inherent::TestWeightInfo; -} - -impl parachains_scheduler::Config for Runtime {} - -impl parachains_session_info::Config for Runtime {} - -impl parachains_shared::Config for Runtime {} - -parameter_types! { - pub const FirstMessageFactorPercent: u64 = 100; -} - -impl parachains_ump::Config for Runtime { - type Event = Event; - type UmpSink = (); - type FirstMessageFactorPercent = FirstMessageFactorPercent; - type ExecuteOverweightOrigin = EnsureRoot; - type WeightInfo = parachains_ump::TestWeightInfo; -} - -// required onboarding pallets. We're not going to use auctions or crowdloans, so they're missing - -parameter_types! { - pub const ParaDeposit: Balance = 0; - pub const DataDepositPerByte: Balance = 0; -} - -impl paras_registrar::Config for Runtime { - type Event = Event; - type Origin = Origin; - type Currency = Balances; - type OnSwap = Slots; - type ParaDeposit = ParaDeposit; - type DataDepositPerByte = DataDepositPerByte; - type WeightInfo = paras_registrar::TestWeightInfo; -} - -parameter_types! { - pub const LeasePeriod: BlockNumber = 10 * bp_rialto::MINUTES; -} - -impl slots::Config for Runtime { - type Event = Event; - type Currency = Balances; - type Registrar = Registrar; - type LeasePeriod = LeasePeriod; - type WeightInfo = slots::TestWeightInfo; - type LeaseOffset = (); - type ForceOrigin = EnsureRoot; -} - -impl paras_sudo_wrapper::Config for Runtime {} - -pub struct ZeroWeights; - -impl polkadot_runtime_common::paras_registrar::WeightInfo for ZeroWeights { - fn reserve() -> Weight { - 0 - } - fn register() -> Weight { - 0 - } - fn force_register() -> Weight { - 0 - } - fn deregister() -> Weight { - 0 - } - fn swap() -> Weight { - 0 - } -} - -impl polkadot_runtime_common::slots::WeightInfo for ZeroWeights { - fn force_lease() -> Weight { - 0 - } - fn manage_lease_period_start(_c: u32, _t: u32) -> Weight { - 0 - } - fn clear_all_leases() -> Weight { - 0 - } - fn trigger_onboard() -> Weight { - 0 - } -} diff --git a/polkadot/bridges/bin/runtime-common/Cargo.toml b/polkadot/bridges/bin/runtime-common/Cargo.toml deleted file mode 100644 index d1ec30c0aa4..00000000000 --- a/polkadot/bridges/bin/runtime-common/Cargo.toml +++ /dev/null @@ -1,73 +0,0 @@ -[package] -name = "bridge-runtime-common" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/parity-bridges-common/" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } -ed25519-dalek = { version = "1.0", default-features = false, optional = true } -hash-db = { version = "0.15.2", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -static_assertions = { version = "1.1", optional = true } - -# Bridge dependencies - -bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-dispatch = { path = "../../modules/dispatch", default-features = false } -pallet-bridge-grandpa = { path = "../../modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../modules/messages", default-features = false } - -# Substrate dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } - -[features] -default = ["std"] -std = [ - "bp-message-dispatch/std", - "bp-messages/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "hash-db/std", - "scale-info/std", - "pallet-bridge-dispatch/std", - "pallet-bridge-grandpa/std", - "pallet-bridge-messages/std", - "pallet-transaction-payment/std", - "scale-info/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", - "sp-state-machine/std", - "sp-std/std", - "sp-trie/std", -] -runtime-benchmarks = [ - "ed25519-dalek/u64_backend", - "pallet-balances", - "pallet-bridge-grandpa/runtime-benchmarks", - "pallet-bridge-messages/runtime-benchmarks", - "sp-state-machine", - "sp-version", -] -integrity-test = [ - "static_assertions", -] diff --git a/polkadot/bridges/bin/runtime-common/README.md b/polkadot/bridges/bin/runtime-common/README.md deleted file mode 100644 index 5f2298cd787..00000000000 --- a/polkadot/bridges/bin/runtime-common/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# Helpers for Messages Module Integration - -The [`messages`](./src/messages.rs) module of this crate contains a bunch of helpers for integrating -messages module into your runtime. Basic prerequisites of these helpers are: -- we're going to bridge Substrate-based chain with another Substrate-based chain; -- both chains have [messages module](../../modules/messages/README.md), Substrate bridge - module and the [call dispatch module](../../modules/dispatch/README.md); -- all message lanes are identical and may be used to transfer the same messages; -- the messages sent over the bridge are dispatched using - [call dispatch module](../../modules/dispatch/README.md); -- the messages are `bp_message_dispatch::MessagePayload` structures, where `call` field is - encoded `Call` of the target chain. This means that the `Call` is opaque to the - [messages module](../../modules/messages/README.md) instance at the source chain. - It is pre-encoded by the message submitter; -- all proofs in the [messages module](../../modules/messages/README.md) transactions are - based on the storage proofs from the bridged chain: storage proof of the outbound message (value - from the `pallet_bridge_messages::Store::MessagePayload` map), storage proof of the outbound lane - state (value from the `pallet_bridge_messages::Store::OutboundLanes` map) and storage proof of the - inbound lane state (value from the `pallet_bridge_messages::Store::InboundLanes` map); -- storage proofs are built at the finalized headers of the corresponding chain. So all message lane - transactions with proofs are verifying storage proofs against finalized chain headers from - Substrate bridge module. - -**IMPORTANT NOTE**: after reading this document, you may refer to our test runtimes -([rialto_messages.rs](../millau/runtime/src/rialto_messages.rs) and/or -[millau_messages.rs](../rialto/runtime/src/millau_messages.rs)) to see how to use these helpers. - -## Contents -- [`MessageBridge` Trait](#messagebridge-trait) -- [`ChainWithMessages` Trait ](#ChainWithMessages-trait) -- [Helpers for the Source Chain](#helpers-for-the-source-chain) -- [Helpers for the Target Chain](#helpers-for-the-target-chain) - -## `MessageBridge` Trait - -The essence of your integration will be a struct that implements a `MessageBridge` trait. It has -single method (`MessageBridge::bridged_balance_to_this_balance`), used to convert from bridged chain -tokens into this chain tokens. The bridge also requires two associated types to be specified - -`ThisChain` and `BridgedChain`. - -Worth to say that if you're going to use hardcoded constant (conversion rate) in the -`MessageBridge::bridged_balance_to_this_balance` method (or in any other method of -`ThisChainWithMessages` or `BridgedChainWithMessages` traits), then you should take a -look at the -[messages parameters functionality](../../modules/messages/README.md#Non-Essential-Functionality). -They allow pallet owner to update constants more frequently than runtime upgrade happens. - -## `ChainWithMessages` Trait - -The trait is quite simple and can easily be implemented - you just need to specify types used at the -corresponding chain. There is single exception, though (it may be changed in the future): - -- `ChainWithMessages::MessagesInstance`: this is used to compute runtime storage keys. There - may be several instances of messages pallet, included in the Runtime. Every instance stores - messages and these messages stored under different keys. When we are verifying storage proofs from - the bridged chain, we should know which instance we're talking to. This is fine, but there's - significant inconvenience with that - this chain runtime must have the same messages pallet - instance. This does not necessarily mean that we should use the same instance on both chains - - this instance may be used to bridge with another chain/instance, or may not be used at all. - -## `ThisChainWithMessages` Trait - -This trait represents this chain from bridge point of view. Let's review every method of this trait: - -- `ThisChainWithMessages::is_message_accepted`: is used to check whether given lane accepts - messages. The send-message origin is passed to the function, so you may e.g. verify that only - given pallet is able to send messages over selected lane. **IMPORTANT**: if you assume that the - message must be paid by the sender, you must ensure that the sender origin has linked the account - for paying message delivery and dispatch fee. - -- `ThisChainWithMessages::maximal_pending_messages_at_outbound_lane`: you should return maximal - number of pending (undelivered) messages from this function. Returning small values would require - relayers to operate faster and could make message sending logic more complicated. On the other - hand, returning large values could lead to chain state growth. - -- `ThisChainWithMessages::estimate_delivery_confirmation_transaction`: you'll need to return - estimated size and dispatch weight of the delivery confirmation transaction (that happens on - this chain) from this function. - -- `ThisChainWithMessages::transaction_payment`: you'll need to return fee that the submitter - must pay for given transaction on this chain. Normally, you would use transaction payment pallet - for this. However, if your chain has non-zero fee multiplier set, this would mean that the - payment will be computed using current value of this multiplier. But since this transaction - will be submitted in the future, you may want to choose other value instead. Otherwise, - non-altruistic relayer may choose not to submit this transaction until number of transactions - will decrease. - -## `BridgedChainWithMessages` Trait - -This trait represents this chain from bridge point of view. Let's review every method of this trait: - -- `BridgedChainWithMessages::maximal_extrinsic_size`: you will need to return the maximal - extrinsic size of the target chain from this function. - -- `MessageBridge::message_weight_limits`: you'll need to return a range of - dispatch weights that the outbound message may take at the target chain. Please keep in mind that - our helpers assume that the message is an encoded call of the target chain. But we never decode - this call at the source chain. So you can't simply get dispatch weight from pre-dispatch - information. Instead there are two options to prepare this range: if you know which calls are to - be sent over your bridge, then you may just return weight ranges for these particular calls. - Otherwise, if you're going to accept all kinds of calls, you may just return range `[0; maximal - incoming message dispatch weight]`. If you choose the latter, then you shall remember that the - delivery transaction itself has some weight, so you can't accept messages with weight equal to - maximal weight of extrinsic at the target chain. In our test chains, we reject all messages that - have declared dispatch weight larger than 50% of the maximal bridged extrinsic weight. - -- `MessageBridge::estimate_delivery_transaction`: you will need to return estimated dispatch weight and - size of the delivery transaction that delivers a given message to the target chain. The transaction - weight must or must not include the weight of pay-dispatch-fee operation, depending on the value - of `include_pay_dispatch_fee_cost` argument. - -- `MessageBridge::transaction_payment`: you'll need to return fee that the submitter - must pay for given transaction on bridged chain. The best case is when you have the same conversion - formula on both chains - then you may just reuse the `ThisChainWithMessages::transaction_payment` - implementation. Otherwise, you'll need to hardcode this formula into your runtime. - -## Helpers for the Source Chain - -The helpers for the Source Chain reside in the `source` submodule of the -[`messages`](./src/messages.rs) module. The structs are: `FromThisChainMessagePayload`, -`FromBridgedChainMessagesDeliveryProof`, `FromThisChainMessageVerifier`. And the helper functions -are: `maximal_message_size`, `verify_chain_message`, `verify_messages_delivery_proof` and -`estimate_message_dispatch_and_delivery_fee`. - -`FromThisChainMessagePayload` is a message that the sender sends through our bridge. It is the -`bp_message_dispatch::MessagePayload`, where `call` field is encoded target chain call. So -at this chain we don't see internals of this call - we just know its size. - -`FromThisChainMessageVerifier` is an implementation of `bp_messages::LaneMessageVerifier`. It -has following checks in its `verify_message` method: - -1. it'll verify that the used outbound lane is enabled in our runtime; - -1. it'll reject messages if there are too many undelivered outbound messages at this lane. The - sender need to wait while relayers will do their work before sending the message again; - -1. it'll reject a message if it has the wrong dispatch origin declared. Like if the submitter is not - the root of this chain, but it tries to dispatch the message at the target chain using - `bp_message_dispatch::CallOrigin::SourceRoot` origin. Or he has provided wrong signature - in the `bp_message_dispatch::CallOrigin::TargetAccount` origin; - -1. it'll reject a message if the delivery and dispatch fee that the submitter wants to pay is lesser - than the fee that is computed using the `estimate_message_dispatch_and_delivery_fee` function. - -`estimate_message_dispatch_and_delivery_fee` returns a minimal fee that the submitter needs to pay -for sending a given message. The fee includes: payment for the delivery transaction at the target -chain, payment for delivery confirmation transaction on this chain, payment for `Call` dispatch at -the target chain and relayer interest. - -`FromBridgedChainMessagesDeliveryProof` holds the lane identifier and the storage proof of this -inbound lane state at the bridged chain. This also holds the hash of the target chain header, that -was used to generate this storage proof. The proof is verified by the -`verify_messages_delivery_proof`, which simply checks that the target chain header is finalized -(using Substrate bridge module) and then reads the inbound lane state from the proof. - -`verify_chain_message` function checks that the message may be delivered to the bridged chain. There -are two main checks: - -1. that the message size is less than or equal to the `2/3` of maximal extrinsic size at the target - chain. We leave `1/3` for signed extras and for the storage proof overhead; - -1. that the message dispatch weight is less than or equal to the `1/2` of maximal normal extrinsic - weight at the target chain. We leave `1/2` for the delivery transaction overhead. - -## Helpers for the Target Chain - -The helpers for the target chain reside in the `target` submodule of the -[`messages`](./src/messages.rs) module. The structs are: `FromBridgedChainMessagePayload`, -`FromBridgedChainMessagesProof`, `FromBridgedChainMessagesProof`. And the helper functions are: -`maximal_incoming_message_dispatch_weight`, `maximal_incoming_message_size` and -`verify_messages_proof`. - -`FromBridgedChainMessagePayload` corresponds to the `FromThisChainMessagePayload` at the bridged -chain. We expect that messages with this payload are stored in the `OutboundMessages` storage map of -the [messages module](../../modules/messages/README.md). This map is used to build -`FromBridgedChainMessagesProof`. The proof holds the lane id, range of message nonces included in -the proof, storage proof of `OutboundMessages` entries and the hash of bridged chain header that has -been used to build the proof. Additionally, there's storage proof may contain the proof of outbound -lane state. It may be required to prune `relayers` entries at this chain (see -[messages module documentation](../../modules/messages/README.md#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) -for details). This proof is verified by the `verify_messages_proof` function. diff --git a/polkadot/bridges/bin/runtime-common/src/integrity.rs b/polkadot/bridges/bin/runtime-common/src/integrity.rs deleted file mode 100644 index ab517566a0f..00000000000 --- a/polkadot/bridges/bin/runtime-common/src/integrity.rs +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Integrity tests for chain constants and pallets configuration. -//! -//! Most of the tests in this module assume that the bridge is using standard (see `crate::messages` -//! module for details) configuration. - -use crate::messages::MessageBridge; - -use bp_messages::MessageNonce; -use bp_runtime::{Chain, ChainId}; -use codec::Encode; -use frame_support::{storage::generator::StorageValue, traits::Get}; -use frame_system::limits; - -/// Macro that ensures that the runtime configuration and chain primitives crate are sharing -/// the same types (index, block number, hash, hasher, account id and header). -#[macro_export] -macro_rules! assert_chain_types( - ( runtime: $r:path, this_chain: $this:path ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use frame_system::Config as SystemConfig; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as SystemConfig>::Index, bp_runtime::IndexOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::BlockNumber, bp_runtime::BlockNumberOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::Hash, bp_runtime::HashOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::Hashing, bp_runtime::HasherOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::AccountId, bp_runtime::AccountIdOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::Header, bp_runtime::HeaderOf<$this>); - } - } -); - -/// Macro that ensures that the bridge configuration and chain primitives crates are sharing -/// the same types (hash, account id, ...). -#[macro_export] -macro_rules! assert_bridge_types( - ( bridge: $bridge:path, this_chain: $this:path, bridged_chain: $bridged:path ) => { - { - // if one of this asserts fail, then all chains, bridged with this chain and bridge relays are now broken - // - // `frame_support::weights::Weight` is used here directly, because all chains we know are using this - // primitive (may be changed in the future) - use $crate::messages::{ - AccountIdOf, BalanceOf, BridgedChain, HashOf, SignatureOf, SignerOf, ThisChain, WeightOf, - }; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(HashOf>, bp_runtime::HashOf<$this>); - assert_type_eq_all!(AccountIdOf>, bp_runtime::AccountIdOf<$this>); - assert_type_eq_all!(SignerOf>, bp_runtime::AccountPublicOf<$this>); - assert_type_eq_all!(SignatureOf>, bp_runtime::SignatureOf<$this>); - assert_type_eq_all!(WeightOf>, frame_support::weights::Weight); - assert_type_eq_all!(BalanceOf>, bp_runtime::BalanceOf<$this>); - - assert_type_eq_all!(HashOf>, bp_runtime::HashOf<$bridged>); - assert_type_eq_all!(AccountIdOf>, bp_runtime::AccountIdOf<$bridged>); - assert_type_eq_all!(SignerOf>, bp_runtime::AccountPublicOf<$bridged>); - assert_type_eq_all!(SignatureOf>, bp_runtime::SignatureOf<$bridged>); - assert_type_eq_all!(WeightOf>, frame_support::weights::Weight); - assert_type_eq_all!(BalanceOf>, bp_runtime::BalanceOf<$bridged>); - } - } -); - -/// Macro that ensures that the bridge GRANDPA pallet is configured properly to bridge with given -/// chain. -#[macro_export] -macro_rules! assert_bridge_grandpa_pallet_types( - ( runtime: $r:path, with_bridged_chain_grandpa_instance: $i:path, bridged_chain: $bridged:path ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use pallet_bridge_grandpa::Config as GrandpaConfig; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as GrandpaConfig<$i>>::BridgedChain, $bridged); - } - } -); - -/// Macro that ensures that the bridge messages pallet is configured properly to bridge using given -/// configuration. -#[macro_export] -macro_rules! assert_bridge_messages_pallet_types( - ( - runtime: $r:path, - with_bridged_chain_messages_instance: $i:path, - bridge: $bridge:path, - this_chain_account_id_converter: $this_converter:path - ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use $crate::messages::{ - source::FromThisChainMessagePayload, - target::FromBridgedChainMessagePayload, - AccountIdOf, BalanceOf, BridgedChain, ThisChain, WeightOf, - }; - use pallet_bridge_messages::Config as MessagesConfig; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::OutboundPayload, FromThisChainMessagePayload<$bridge>); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::OutboundMessageFee, BalanceOf>); - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundPayload, FromBridgedChainMessagePayload<$bridge>); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundMessageFee, BalanceOf>); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundRelayer, AccountIdOf>); - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::AccountIdConverter, $this_converter); - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::TargetHeaderChain, BridgedChain<$bridge>); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::SourceHeaderChain, BridgedChain<$bridge>); - } - } -); - -/// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, -/// `assert_bridge_grandpa_pallet_types` and `assert_bridge_messages_pallet_types`. It may be used -/// at the chain that is implemeting complete standard messages bridge (i.e. with bridge GRANDPA and -/// messages pallets deployed). -#[macro_export] -macro_rules! assert_complete_bridge_types( - ( - runtime: $r:path, - with_bridged_chain_grandpa_instance: $gi:path, - with_bridged_chain_messages_instance: $mi:path, - bridge: $bridge:path, - this_chain: $this:path, - bridged_chain: $bridged:path, - this_chain_account_id_converter: $this_converter:path - ) => { - $crate::assert_chain_types!(runtime: $r, this_chain: $this); - $crate::assert_bridge_types!(bridge: $bridge, this_chain: $this, bridged_chain: $bridged); - $crate::assert_bridge_grandpa_pallet_types!( - runtime: $r, - with_bridged_chain_grandpa_instance: $gi, - bridged_chain: $bridged - ); - $crate::assert_bridge_messages_pallet_types!( - runtime: $r, - with_bridged_chain_messages_instance: $mi, - bridge: $bridge, - this_chain_account_id_converter: $this_converter - ); - } -); - -/// Parameters for asserting chain-related constants. -#[derive(Debug)] -pub struct AssertChainConstants { - /// Block length limits of the chain. - pub block_length: limits::BlockLength, - /// Block weight limits of the chain. - pub block_weights: limits::BlockWeights, -} - -/// Test that our hardcoded, chain-related constants, are matching chain runtime configuration. -/// -/// In particular, this test ensures that: -/// -/// 1) block weight limits are matching; -/// 2) block size limits are matching. -pub fn assert_chain_constants(params: AssertChainConstants) -where - R: frame_system::Config, - C: Chain, -{ - // we don't check runtime version here, because in our case we'll be building relay from one - // repo and runtime will live in another repo, along with outdated relay version. To avoid - // unneeded commits, let's not raise an error in case of version mismatch. - - // if one of following assert fails, it means that we may need to upgrade bridged chain and - // relay to use updated constants. If constants are now smaller than before, it may lead to - // undeliverable messages. - - // `BlockLength` struct is not implementing `PartialEq`, so we compare encoded values here. - assert_eq!( - R::BlockLength::get().encode(), - params.block_length.encode(), - "BlockLength from runtime ({:?}) differ from hardcoded: {:?}", - R::BlockLength::get(), - params.block_length, - ); - // `BlockWeights` struct is not implementing `PartialEq`, so we compare encoded values here - assert_eq!( - R::BlockWeights::get().encode(), - params.block_weights.encode(), - "BlockWeights from runtime ({:?}) differ from hardcoded: {:?}", - R::BlockWeights::get(), - params.block_weights, - ); -} - -/// Test that the constants, used in GRANDPA pallet configuration are valid. -pub fn assert_bridge_grandpa_pallet_constants() -where - R: pallet_bridge_grandpa::Config, - GI: 'static, -{ - assert!( - R::MaxRequests::get() > 0, - "MaxRequests ({}) must be larger than zero", - R::MaxRequests::get(), - ); -} - -/// Parameters for asserting messages pallet constants. -#[derive(Debug)] -pub struct AssertBridgeMessagesPalletConstants { - /// Maximal number of unrewarded relayer entries in a confirmation transaction at the bridged - /// chain. - pub max_unrewarded_relayers_in_bridged_confirmation_tx: MessageNonce, - /// Maximal number of unconfirmed messages in a confirmation transaction at the bridged chain. - pub max_unconfirmed_messages_in_bridged_confirmation_tx: MessageNonce, - /// Identifier of the bridged chain. - pub bridged_chain_id: ChainId, -} - -/// Test that the constants, used in messages pallet configuration are valid. -pub fn assert_bridge_messages_pallet_constants(params: AssertBridgeMessagesPalletConstants) -where - R: pallet_bridge_messages::Config, - MI: 'static, -{ - assert!( - R::MaxMessagesToPruneAtOnce::get() > 0, - "MaxMessagesToPruneAtOnce ({}) must be larger than zero", - R::MaxMessagesToPruneAtOnce::get(), - ); - assert!( - R::MaxUnrewardedRelayerEntriesAtInboundLane::get() <= params.max_unrewarded_relayers_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnrewardedRelayerEntriesAtInboundLane::get(), - params.max_unrewarded_relayers_in_bridged_confirmation_tx, - ); - assert!( - R::MaxUnconfirmedMessagesAtInboundLane::get() <= params.max_unconfirmed_messages_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnconfirmedMessagesAtInboundLane::get(), - params.max_unconfirmed_messages_in_bridged_confirmation_tx, - ); - assert_eq!(R::BridgedChainId::get(), params.bridged_chain_id); -} - -/// Parameters for asserting bridge pallet names. -#[derive(Debug)] -pub struct AssertBridgePalletNames<'a> { - /// Name of the messages pallet, deployed at the bridged chain and used to bridge with this - /// chain. - pub with_this_chain_messages_pallet_name: &'a str, - /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_grandpa_pallet_name: &'a str, - /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_messages_pallet_name: &'a str, -} - -/// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants -/// from chain primitives crates. -pub fn assert_bridge_pallet_names(params: AssertBridgePalletNames) -where - B: MessageBridge, - R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, - GI: 'static, - MI: 'static, -{ - assert_eq!(B::BRIDGED_MESSAGES_PALLET_NAME, params.with_this_chain_messages_pallet_name); - assert_eq!( - pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key(params.with_bridged_chain_grandpa_pallet_name, "PalletOwner",).0, - ); - assert_eq!( - pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key( - params.with_bridged_chain_messages_pallet_name, - "PalletOwner", - ) - .0, - ); -} - -/// Parameters for asserting complete standard messages bridge. -#[derive(Debug)] -pub struct AssertCompleteBridgeConstants<'a> { - /// Parameters to assert this chain constants. - pub this_chain_constants: AssertChainConstants, - /// Parameters to assert messages pallet constants. - pub messages_pallet_constants: AssertBridgeMessagesPalletConstants, - /// Parameters to assert pallet names constants. - pub pallet_names: AssertBridgePalletNames<'a>, -} - -/// All bridge-related constants tests for the complete standard messages bridge (i.e. with bridge -/// GRANDPA and messages pallets deployed). -pub fn assert_complete_bridge_constants(params: AssertCompleteBridgeConstants) -where - R: frame_system::Config - + pallet_bridge_grandpa::Config - + pallet_bridge_messages::Config, - GI: 'static, - MI: 'static, - B: MessageBridge, - This: Chain, -{ - assert_chain_constants::(params.this_chain_constants); - assert_bridge_grandpa_pallet_constants::(); - assert_bridge_messages_pallet_constants::(params.messages_pallet_constants); - assert_bridge_pallet_names::(params.pallet_names); -} diff --git a/polkadot/bridges/bin/runtime-common/src/lib.rs b/polkadot/bridges/bin/runtime-common/src/lib.rs deleted file mode 100644 index c7fb98aba76..00000000000 --- a/polkadot/bridges/bin/runtime-common/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Common types/functions that may be used by runtimes of all bridged chains. - -#![cfg_attr(not(feature = "std"), no_std)] - -pub mod messages; -pub mod messages_api; -pub mod messages_benchmarking; - -#[cfg(feature = "integrity-test")] -pub mod integrity; diff --git a/polkadot/bridges/bin/runtime-common/src/messages.rs b/polkadot/bridges/bin/runtime-common/src/messages.rs deleted file mode 100644 index a26dce92bce..00000000000 --- a/polkadot/bridges/bin/runtime-common/src/messages.rs +++ /dev/null @@ -1,1661 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that allow runtime to act as a source/target endpoint of message lanes. -//! -//! Messages are assumed to be encoded `Call`s of the target chain. Call-dispatch -//! pallet is used to dispatch incoming messages. Message identified by a tuple -//! of to elements - message lane id and message nonce. - -use bp_message_dispatch::MessageDispatch as _; -use bp_messages::{ - source_chain::LaneMessageVerifier, - target_chain::{DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages}, - InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, OutboundLaneData, -}; -use bp_runtime::{ - messages::{DispatchFeePayment, MessageDispatchResult}, - ChainId, Size, StorageProofChecker, -}; -use codec::{Decode, DecodeLimit, Encode}; -use frame_support::{ - traits::{Currency, ExistenceRequirement}, - weights::{Weight, WeightToFee}, - RuntimeDebug, -}; -use hash_db::Hasher; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{AtLeast32BitUnsigned, CheckedAdd, CheckedDiv, CheckedMul, Saturating, Zero}, - FixedPointNumber, FixedPointOperand, FixedU128, -}; -use sp_std::{cmp::PartialOrd, fmt::Debug, marker::PhantomData, ops::RangeInclusive, vec::Vec}; -use sp_trie::StorageProof; - -/// Bidirectional message bridge. -pub trait MessageBridge { - /// Relayer interest (in percents). - const RELAYER_FEE_PERCENT: u32; - - /// Identifier of this chain. - const THIS_CHAIN_ID: ChainId; - /// Identifier of the Bridged chain. - const BRIDGED_CHAIN_ID: ChainId; - /// Name of the paired messages pallet instance at the Bridged chain. - /// - /// Should be the name that is used in the `construct_runtime!()` macro. - const BRIDGED_MESSAGES_PALLET_NAME: &'static str; - - /// This chain in context of message bridge. - type ThisChain: ThisChainWithMessages; - /// Bridged chain in context of message bridge. - type BridgedChain: BridgedChainWithMessages; - - /// Convert Bridged chain balance into This chain balance. - fn bridged_balance_to_this_balance( - bridged_balance: BalanceOf>, - bridged_to_this_conversion_rate_override: Option, - ) -> BalanceOf>; -} - -/// Chain that has `pallet-bridge-messages` and `dispatch` modules. -pub trait ChainWithMessages { - /// Hash used in the chain. - type Hash: Decode; - /// Accound id on the chain. - type AccountId: Encode + Decode; - /// Public key of the chain account that may be used to verify signatures. - type Signer: Encode + Decode; - /// Signature type used on the chain. - type Signature: Encode + Decode; - /// Type of weight that is used on the chain. This would almost always be a regular - /// `frame_support::weight::Weight`. But since the meaning of weight on different chains - /// may be different, the `WeightOf<>` construct is used to avoid confusion between - /// different weights. - type Weight: From + PartialOrd; - /// Type of balances that is used on the chain. - type Balance: Encode - + Decode - + CheckedAdd - + CheckedDiv - + CheckedMul - + PartialOrd - + From - + Copy; -} - -/// Message related transaction parameters estimation. -#[derive(RuntimeDebug)] -pub struct MessageTransaction { - /// The estimated dispatch weight of the transaction. - pub dispatch_weight: Weight, - /// The estimated size of the encoded transaction. - pub size: u32, -} - -/// This chain that has `pallet-bridge-messages` and `dispatch` modules. -pub trait ThisChainWithMessages: ChainWithMessages { - /// Call origin on the chain. - type Origin; - /// Call type on the chain. - type Call: Encode + Decode; - - /// Do we accept message sent by given origin to given lane? - fn is_message_accepted(origin: &Self::Origin, lane: &LaneId) -> bool; - - /// Maximal number of pending (not yet delivered) messages at This chain. - /// - /// Any messages over this limit, will be rejected. - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce; - - /// Estimate size and weight of single message delivery confirmation transaction at This chain. - fn estimate_delivery_confirmation_transaction() -> MessageTransaction>; - - /// Returns minimal transaction fee that must be paid for given transaction at This chain. - fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf; -} - -/// Bridged chain that has `pallet-bridge-messages` and `dispatch` modules. -pub trait BridgedChainWithMessages: ChainWithMessages { - /// Maximal extrinsic size at Bridged chain. - fn maximal_extrinsic_size() -> u32; - - /// Returns feasible weights range for given message payload at the Bridged chain. - /// - /// If message is being sent with the weight that is out of this range, then it - /// should be rejected. - /// - /// Weights returned from this function shall not include transaction overhead - /// (like weight of signature and signed extensions verification), because they're - /// already accounted by the `weight_of_delivery_transaction`. So this function should - /// return pure call dispatch weights range. - fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive; - - /// Estimate size and weight of single message delivery transaction at the Bridged chain. - fn estimate_delivery_transaction( - message_payload: &[u8], - include_pay_dispatch_fee_cost: bool, - message_dispatch_weight: WeightOf, - ) -> MessageTransaction>; - - /// Returns minimal transaction fee that must be paid for given transaction at the Bridged - /// chain. - fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf; -} - -/// This chain in context of message bridge. -pub type ThisChain = ::ThisChain; -/// Bridged chain in context of message bridge. -pub type BridgedChain = ::BridgedChain; -/// Hash used on the chain. -pub type HashOf = ::Hash; -/// Account id used on the chain. -pub type AccountIdOf = ::AccountId; -/// Public key of the chain account that may be used to verify signature. -pub type SignerOf = ::Signer; -/// Signature type used on the chain. -pub type SignatureOf = ::Signature; -/// Type of weight that used on the chain. -pub type WeightOf = ::Weight; -/// Type of balances that is used on the chain. -pub type BalanceOf = ::Balance; -/// Type of origin that is used on the chain. -pub type OriginOf = ::Origin; -/// Type of call that is used on this chain. -pub type CallOf = ::Call; - -/// Raw storage proof type (just raw trie nodes). -pub type RawStorageProof = Vec>; - -/// Compute fee of transaction at runtime where regular transaction payment pallet is being used. -/// -/// The value of `multiplier` parameter is the expected value of -/// `pallet_transaction_payment::NextFeeMultiplier` at the moment when transaction is submitted. If -/// you're charging this payment in advance (and that's what happens with delivery and confirmation -/// transaction in this crate), then there's a chance that the actual fee will be larger than what -/// is paid in advance. So the value must be chosen carefully. -pub fn transaction_payment( - base_extrinsic_weight: Weight, - per_byte_fee: Balance, - multiplier: FixedU128, - weight_to_fee: impl Fn(Weight) -> Balance, - transaction: MessageTransaction, -) -> Balance { - // base fee is charged for every tx - let base_fee = weight_to_fee(base_extrinsic_weight); - - // non-adjustable per-byte fee - let len_fee = per_byte_fee.saturating_mul(Balance::from(transaction.size)); - - // the adjustable part of the fee - let unadjusted_weight_fee = weight_to_fee(transaction.dispatch_weight); - let adjusted_weight_fee = multiplier.saturating_mul_int(unadjusted_weight_fee); - - base_fee.saturating_add(len_fee).saturating_add(adjusted_weight_fee) -} - -/// Sub-module that is declaring types required for processing This -> Bridged chain messages. -pub mod source { - use super::*; - - /// Encoded Call of the Bridged chain. We never try to decode it on This chain. - pub type BridgedChainOpaqueCall = Vec; - - /// Message payload for This -> Bridged chain messages. - pub type FromThisChainMessagePayload = bp_message_dispatch::MessagePayload< - AccountIdOf>, - SignerOf>, - SignatureOf>, - BridgedChainOpaqueCall, - >; - - /// Messages delivery proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of inbound lane state; - /// - lane id. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesDeliveryProof { - /// Hash of the bridge header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// Storage trie proof generated for [`Self::bridged_header_hash`]. - pub storage_proof: RawStorageProof, - /// Lane id of which messages were delivered and the proof is for. - pub lane: LaneId, - } - - impl Size for FromBridgedChainMessagesDeliveryProof { - fn size_hint(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// 'Parsed' message delivery proof - inbound lane id and its state. - pub type ParsedMessagesDeliveryProofFromBridgedChain = - (LaneId, InboundLaneData>>); - - /// Message verifier that is doing all basic checks. - /// - /// This verifier assumes following: - /// - /// - all message lanes are equivalent, so all checks are the same; - /// - messages are being dispatched using `pallet-bridge-dispatch` pallet on the target chain. - /// - /// Following checks are made: - /// - /// - message is rejected if its lane is currently blocked; - /// - message is rejected if there are too many pending (undelivered) messages at the outbound - /// lane; - /// - check that the sender has rights to dispatch the call on target chain using provided - /// dispatch origin; - /// - check that the sender has paid enough funds for both message delivery and dispatch. - #[derive(RuntimeDebug)] - pub struct FromThisChainMessageVerifier(PhantomData); - - /// The error message returned from LaneMessageVerifier when outbound lane is disabled. - pub const MESSAGE_REJECTED_BY_OUTBOUND_LANE: &str = - "The outbound message lane has rejected the message."; - /// The error message returned from LaneMessageVerifier when too many pending messages at the - /// lane. - pub const TOO_MANY_PENDING_MESSAGES: &str = "Too many pending messages at the lane."; - /// The error message returned from LaneMessageVerifier when call origin is mismatch. - pub const BAD_ORIGIN: &str = "Unable to match the source origin to expected target origin."; - /// The error message returned from LaneMessageVerifier when the message fee is too low. - pub const TOO_LOW_FEE: &str = "Provided fee is below minimal threshold required by the lane."; - - impl - LaneMessageVerifier< - OriginOf>, - AccountIdOf>, - FromThisChainMessagePayload, - BalanceOf>, - > for FromThisChainMessageVerifier - where - B: MessageBridge, - // matches requirements from the `frame_system::Config::Origin` - OriginOf>: Clone - + Into>>, OriginOf>>>, - AccountIdOf>: PartialEq + Clone, - { - type Error = &'static str; - - fn verify_message( - submitter: &OriginOf>, - delivery_and_dispatch_fee: &BalanceOf>, - lane: &LaneId, - lane_outbound_data: &OutboundLaneData, - payload: &FromThisChainMessagePayload, - ) -> Result<(), Self::Error> { - // reject message if lane is blocked - if !ThisChain::::is_message_accepted(submitter, lane) { - return Err(MESSAGE_REJECTED_BY_OUTBOUND_LANE) - } - - // reject message if there are too many pending messages at this lane - let max_pending_messages = ThisChain::::maximal_pending_messages_at_outbound_lane(); - let pending_messages = lane_outbound_data - .latest_generated_nonce - .saturating_sub(lane_outbound_data.latest_received_nonce); - if pending_messages > max_pending_messages { - return Err(TOO_MANY_PENDING_MESSAGES) - } - - // Do the dispatch-specific check. We assume that the target chain uses - // `Dispatch`, so we verify the message accordingly. - let raw_origin_or_err: Result< - frame_system::RawOrigin>>, - OriginOf>, - > = submitter.clone().into(); - match raw_origin_or_err { - Ok(raw_origin) => - pallet_bridge_dispatch::verify_message_origin(&raw_origin, payload) - .map(drop) - .map_err(|_| BAD_ORIGIN)?, - Err(_) => { - // so what it means that we've failed to convert origin to the - // `frame_system::RawOrigin`? now it means that the custom pallet origin has - // been used to send the message. Do we need to verify it? The answer is no, - // because pallet may craft any origin (e.g. root) && we can't verify whether it - // is valid, or not. - }, - }; - - let minimal_fee_in_this_tokens = estimate_message_dispatch_and_delivery_fee::( - payload, - B::RELAYER_FEE_PERCENT, - None, - )?; - - // compare with actual fee paid - if *delivery_and_dispatch_fee < minimal_fee_in_this_tokens { - return Err(TOO_LOW_FEE) - } - - Ok(()) - } - } - - /// Return maximal message size of This -> Bridged chain message. - pub fn maximal_message_size() -> u32 { - super::target::maximal_incoming_message_size(BridgedChain::::maximal_extrinsic_size()) - } - - /// Do basic Bridged-chain specific verification of This -> Bridged chain message. - /// - /// Ok result from this function means that the delivery transaction with this message - /// may be 'mined' by the target chain. But the lane may have its own checks (e.g. fee - /// check) that would reject message (see `FromThisChainMessageVerifier`). - pub fn verify_chain_message( - payload: &FromThisChainMessagePayload, - ) -> Result<(), &'static str> { - let weight_limits = BridgedChain::::message_weight_limits(&payload.call); - if !weight_limits.contains(&payload.weight.into()) { - return Err("Incorrect message weight declared") - } - - // The maximal size of extrinsic at Substrate-based chain depends on the - // `frame_system::Config::MaximumBlockLength` and - // `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that - // the lane won't stuck because message is too large to fit into delivery transaction. - // - // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not - // the message itself. The proof is always larger than the message. But unless chain state - // is enormously large, it should be several dozens/hundreds of bytes. The delivery - // transaction also contains signatures and signed extensions. Because of this, we reserve - // 1/3 of the the maximal extrinsic weight for this data. - if payload.call.len() > maximal_message_size::() as usize { - return Err("The message is too large to be sent over the lane") - } - - Ok(()) - } - - /// Estimate delivery and dispatch fee that must be paid for delivering a message to the Bridged - /// chain. - /// - /// The fee is paid in This chain Balance, but we use Bridged chain balance to avoid additional - /// conversions. Returns `None` if overflow has happened. - pub fn estimate_message_dispatch_and_delivery_fee( - payload: &FromThisChainMessagePayload, - relayer_fee_percent: u32, - bridged_to_this_conversion_rate: Option, - ) -> Result>, &'static str> { - // the fee (in Bridged tokens) of all transactions that are made on the Bridged chain - // - // if we're going to pay dispatch fee at the target chain, then we don't include weight - // of the message dispatch in the delivery transaction cost - let pay_dispatch_fee_at_target_chain = - payload.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; - let delivery_transaction = BridgedChain::::estimate_delivery_transaction( - &payload.encode(), - pay_dispatch_fee_at_target_chain, - if pay_dispatch_fee_at_target_chain { 0.into() } else { payload.weight.into() }, - ); - let delivery_transaction_fee = BridgedChain::::transaction_payment(delivery_transaction); - - // the fee (in This tokens) of all transactions that are made on This chain - let confirmation_transaction = ThisChain::::estimate_delivery_confirmation_transaction(); - let confirmation_transaction_fee = - ThisChain::::transaction_payment(confirmation_transaction); - - // minimal fee (in This tokens) is a sum of all required fees - let minimal_fee = B::bridged_balance_to_this_balance( - delivery_transaction_fee, - bridged_to_this_conversion_rate, - ) - .checked_add(&confirmation_transaction_fee); - - // before returning, add extra fee that is paid to the relayer (relayer interest) - minimal_fee - .and_then(|fee| - // having message with fee that is near the `Balance::MAX_VALUE` of the chain is - // unlikely and should be treated as an error - // => let's do multiplication first - fee - .checked_mul(&relayer_fee_percent.into()) - .and_then(|interest| interest.checked_div(&100u32.into())) - .and_then(|interest| fee.checked_add(&interest))) - .ok_or("Overflow when computing minimal required message delivery and dispatch fee") - } - - /// Verify proof of This -> Bridged chain messages delivery. - pub fn verify_messages_delivery_proof( - proof: FromBridgedChainMessagesDeliveryProof>>, - ) -> Result, &'static str> - where - ThisRuntime: pallet_bridge_grandpa::Config, - HashOf>: Into< - bp_runtime::HashOf< - >::BridgedChain, - >, - >, - { - let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = - proof; - pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( - bridged_header_hash.into(), - StorageProof::new(storage_proof), - |storage| { - // Messages delivery proof is just proof of single storage key read => any error - // is fatal. - let storage_inbound_lane_data_key = - bp_messages::storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane); - let raw_inbound_lane_data = storage - .read_value(storage_inbound_lane_data_key.0.as_ref()) - .map_err(|_| "Failed to read inbound lane state from storage proof")? - .ok_or("Inbound lane state is missing from the messages proof")?; - let inbound_lane_data = InboundLaneData::decode(&mut &raw_inbound_lane_data[..]) - .map_err(|_| "Failed to decode inbound lane state from the proof")?; - - Ok((lane, inbound_lane_data)) - }, - ) - .map_err(<&'static str>::from)? - } -} - -/// Sub-module that is declaring types required for processing Bridged -> This chain messages. -pub mod target { - use super::*; - - /// Call origin for Bridged -> This chain messages. - pub type FromBridgedChainMessageCallOrigin = bp_message_dispatch::CallOrigin< - AccountIdOf>, - SignerOf>, - SignatureOf>, - >; - - /// Decoded Bridged -> This message payload. - pub type FromBridgedChainMessagePayload = bp_message_dispatch::MessagePayload< - AccountIdOf>, - SignerOf>, - SignatureOf>, - FromBridgedChainEncodedMessageCall>>, - >; - - /// Messages proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of messages and (optionally) outbound lane state; - /// - lane id; - /// - nonces (inclusive range) of messages which are included in this proof. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesProof { - /// Hash of the finalized bridged header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// A storage trie proof of messages being delivered. - pub storage_proof: RawStorageProof, - pub lane: LaneId, - /// Nonce of the first message being delivered. - pub nonces_start: MessageNonce, - /// Nonce of the last message being delivered. - pub nonces_end: MessageNonce, - } - - impl Size for FromBridgedChainMessagesProof { - fn size_hint(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// Encoded Call of This chain as it is transferred over bridge. - /// - /// Our Call is opaque (`Vec`) for Bridged chain. So it is encoded, prefixed with - /// vector length. Custom decode implementation here is exactly to deal with this. - #[derive(Decode, Encode, RuntimeDebug, PartialEq)] - pub struct FromBridgedChainEncodedMessageCall { - encoded_call: Vec, - _marker: PhantomData, - } - - impl FromBridgedChainEncodedMessageCall { - /// Create encoded call. - pub fn new(encoded_call: Vec) -> Self { - FromBridgedChainEncodedMessageCall { encoded_call, _marker: PhantomData::default() } - } - } - - impl From> - for Result - { - fn from(encoded_call: FromBridgedChainEncodedMessageCall) -> Self { - DecodedCall::decode_with_depth_limit( - sp_api::MAX_EXTRINSIC_DEPTH, - &mut &encoded_call.encoded_call[..], - ) - .map_err(drop) - } - } - - /// Dispatching Bridged -> This chain messages. - #[derive(RuntimeDebug, Clone, Copy)] - pub struct FromBridgedChainMessageDispatch { - _marker: PhantomData<(B, ThisRuntime, ThisCurrency, ThisDispatchInstance)>, - } - - impl - MessageDispatch>, BalanceOf>> - for FromBridgedChainMessageDispatch - where - BalanceOf>: Saturating + FixedPointOperand, - ThisDispatchInstance: 'static, - ThisRuntime: pallet_bridge_dispatch::Config< - ThisDispatchInstance, - BridgeMessageId = (LaneId, MessageNonce), - > + pallet_transaction_payment::Config, - ::OnChargeTransaction: - pallet_transaction_payment::OnChargeTransaction< - ThisRuntime, - Balance = BalanceOf>, - >, - ThisCurrency: Currency>, Balance = BalanceOf>>, - pallet_bridge_dispatch::Pallet: - bp_message_dispatch::MessageDispatch< - AccountIdOf>, - (LaneId, MessageNonce), - Message = FromBridgedChainMessagePayload, - >, - { - type DispatchPayload = FromBridgedChainMessagePayload; - - fn dispatch_weight( - message: &DispatchMessage>>, - ) -> frame_support::weights::Weight { - message.data.payload.as_ref().map(|payload| payload.weight).unwrap_or(0) - } - - fn dispatch( - relayer_account: &AccountIdOf>, - message: DispatchMessage>>, - ) -> MessageDispatchResult { - let message_id = (message.key.lane_id, message.key.nonce); - pallet_bridge_dispatch::Pallet::::dispatch( - B::BRIDGED_CHAIN_ID, - B::THIS_CHAIN_ID, - message_id, - message.data.payload.map_err(drop), - |dispatch_origin, dispatch_weight| { - let unadjusted_weight_fee = - ThisRuntime::WeightToFee::weight_to_fee(&dispatch_weight); - let fee_multiplier = - pallet_transaction_payment::Pallet::::next_fee_multiplier(); - let adjusted_weight_fee = - fee_multiplier.saturating_mul_int(unadjusted_weight_fee); - if !adjusted_weight_fee.is_zero() { - ThisCurrency::transfer( - dispatch_origin, - relayer_account, - adjusted_weight_fee, - ExistenceRequirement::AllowDeath, - ) - .map_err(drop) - } else { - Ok(()) - } - }, - ) - } - } - - /// Return maximal dispatch weight of the message we're able to receive. - pub fn maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - maximal_extrinsic_weight / 2 - } - - /// Return maximal message size given maximal extrinsic size. - pub fn maximal_incoming_message_size(maximal_extrinsic_size: u32) -> u32 { - maximal_extrinsic_size / 3 * 2 - } - - /// Verify proof of Bridged -> This chain messages. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - pub fn verify_messages_proof( - proof: FromBridgedChainMessagesProof>>, - messages_count: u32, - ) -> Result>>>, &'static str> - where - ThisRuntime: pallet_bridge_grandpa::Config, - HashOf>: Into< - bp_runtime::HashOf< - >::BridgedChain, - >, - >, - { - verify_messages_proof_with_parser::( - proof, - messages_count, - |bridged_header_hash, bridged_storage_proof| { - pallet_bridge_grandpa::Pallet::::parse_finalized_storage_proof( - bridged_header_hash.into(), - StorageProof::new(bridged_storage_proof), - |storage_adapter| storage_adapter, - ) - .map(|storage| StorageProofCheckerAdapter::<_, B> { - storage, - _dummy: Default::default(), - }) - .map_err(|err| MessageProofError::Custom(err.into())) - }, - ) - .map_err(Into::into) - } - - #[derive(Debug, PartialEq)] - pub(crate) enum MessageProofError { - Empty, - MessagesCountMismatch, - MissingRequiredMessage, - FailedToDecodeMessage, - FailedToDecodeOutboundLaneState, - Custom(&'static str), - } - - impl From for &'static str { - fn from(err: MessageProofError) -> &'static str { - match err { - MessageProofError::Empty => "Messages proof is empty", - MessageProofError::MessagesCountMismatch => - "Declared messages count doesn't match actual value", - MessageProofError::MissingRequiredMessage => "Message is missing from the proof", - MessageProofError::FailedToDecodeMessage => - "Failed to decode message from the proof", - MessageProofError::FailedToDecodeOutboundLaneState => - "Failed to decode outbound lane data from the proof", - MessageProofError::Custom(err) => err, - } - } - } - - pub(crate) trait MessageProofParser { - fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option>; - fn read_raw_message(&self, message_key: &MessageKey) -> Option>; - } - - struct StorageProofCheckerAdapter { - storage: StorageProofChecker, - _dummy: sp_std::marker::PhantomData, - } - - impl MessageProofParser for StorageProofCheckerAdapter - where - H: Hasher, - B: MessageBridge, - { - fn read_raw_outbound_lane_data(&self, lane_id: &LaneId) -> Option> { - let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - lane_id, - ); - self.storage.read_value(storage_outbound_lane_data_key.0.as_ref()).ok()? - } - - fn read_raw_message(&self, message_key: &MessageKey) -> Option> { - let storage_message_key = bp_messages::storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &message_key.lane_id, - message_key.nonce, - ); - self.storage.read_value(storage_message_key.0.as_ref()).ok()? - } - } - - /// Verify proof of Bridged -> This chain messages using given message proof parser. - pub(crate) fn verify_messages_proof_with_parser( - proof: FromBridgedChainMessagesProof>>, - messages_count: u32, - build_parser: BuildParser, - ) -> Result>>>, MessageProofError> - where - BuildParser: - FnOnce(HashOf>, RawStorageProof) -> Result, - Parser: MessageProofParser, - { - let FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane, - nonces_start, - nonces_end, - } = proof; - - // receiving proofs where end < begin is ok (if proof includes outbound lane state) - let messages_in_the_proof = - if let Some(nonces_difference) = nonces_end.checked_sub(nonces_start) { - // let's check that the user (relayer) has passed correct `messages_count` - // (this bounds maximal capacity of messages vec below) - let messages_in_the_proof = nonces_difference.saturating_add(1); - if messages_in_the_proof != MessageNonce::from(messages_count) { - return Err(MessageProofError::MessagesCountMismatch) - } - - messages_in_the_proof - } else { - 0 - }; - - let parser = build_parser(bridged_header_hash, storage_proof)?; - - // Read messages first. All messages that are claimed to be in the proof must - // be in the proof. So any error in `read_value`, or even missing value is fatal. - // - // Mind that we allow proofs with no messages if outbound lane state is proved. - let mut messages = Vec::with_capacity(messages_in_the_proof as _); - for nonce in nonces_start..=nonces_end { - let message_key = MessageKey { lane_id: lane, nonce }; - let raw_message_data = parser - .read_raw_message(&message_key) - .ok_or(MessageProofError::MissingRequiredMessage)?; - let message_data = - MessageData::>>::decode(&mut &raw_message_data[..]) - .map_err(|_| MessageProofError::FailedToDecodeMessage)?; - messages.push(Message { key: message_key, data: message_data }); - } - - // Now let's check if proof contains outbound lane state proof. It is optional, so we - // simply ignore `read_value` errors and missing value. - let mut proved_lane_messages = ProvedLaneMessages { lane_state: None, messages }; - let raw_outbound_lane_data = parser.read_raw_outbound_lane_data(&lane); - if let Some(raw_outbound_lane_data) = raw_outbound_lane_data { - proved_lane_messages.lane_state = Some( - OutboundLaneData::decode(&mut &raw_outbound_lane_data[..]) - .map_err(|_| MessageProofError::FailedToDecodeOutboundLaneState)?, - ); - } - - // Now we may actually check if the proof is empty or not. - if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { - return Err(MessageProofError::Empty) - } - - // We only support single lane messages in this schema - let mut proved_messages = ProvedMessages::new(); - proved_messages.insert(lane, proved_lane_messages); - - Ok(proved_messages) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use codec::{Decode, Encode}; - use frame_support::weights::Weight; - use std::ops::RangeInclusive; - - const DELIVERY_TRANSACTION_WEIGHT: Weight = 100; - const DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT: Weight = 100; - const THIS_CHAIN_WEIGHT_TO_BALANCE_RATE: Weight = 2; - const BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE: Weight = 4; - const BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE: u32 = 6; - const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: Weight = 2048; - const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; - - /// Bridge that is deployed on ThisChain and allows sending/receiving messages to/from - /// BridgedChain; - #[derive(Debug, PartialEq, Eq)] - struct OnThisChainBridge; - - impl MessageBridge for OnThisChainBridge { - const RELAYER_FEE_PERCENT: u32 = 10; - const THIS_CHAIN_ID: ChainId = *b"this"; - const BRIDGED_CHAIN_ID: ChainId = *b"brdg"; - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = ThisChain; - type BridgedChain = BridgedChain; - - fn bridged_balance_to_this_balance( - bridged_balance: BridgedChainBalance, - bridged_to_this_conversion_rate_override: Option, - ) -> ThisChainBalance { - let conversion_rate = bridged_to_this_conversion_rate_override - .map(|r| r.to_float() as u32) - .unwrap_or(BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE); - ThisChainBalance(bridged_balance.0 * conversion_rate) - } - } - - /// Bridge that is deployed on BridgedChain and allows sending/receiving messages to/from - /// ThisChain; - #[derive(Debug, PartialEq, Eq)] - struct OnBridgedChainBridge; - - impl MessageBridge for OnBridgedChainBridge { - const RELAYER_FEE_PERCENT: u32 = 20; - const THIS_CHAIN_ID: ChainId = *b"brdg"; - const BRIDGED_CHAIN_ID: ChainId = *b"this"; - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = BridgedChain; - type BridgedChain = ThisChain; - - fn bridged_balance_to_this_balance( - _this_balance: ThisChainBalance, - _bridged_to_this_conversion_rate_override: Option, - ) -> BridgedChainBalance { - unreachable!() - } - } - - #[derive(Debug, PartialEq, Decode, Encode, Clone)] - struct ThisChainAccountId(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct ThisChainSigner(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct ThisChainSignature(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - enum ThisChainCall { - #[codec(index = 42)] - Transfer, - #[codec(index = 84)] - Mint, - } - #[derive(Clone, Debug)] - struct ThisChainOrigin(Result, ()>); - - impl From - for Result, ThisChainOrigin> - { - fn from( - origin: ThisChainOrigin, - ) -> Result, ThisChainOrigin> { - origin.clone().0.map_err(|_| origin) - } - } - - #[derive(Debug, PartialEq, Decode, Encode)] - struct BridgedChainAccountId(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct BridgedChainSigner(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - struct BridgedChainSignature(u32); - #[derive(Debug, PartialEq, Decode, Encode)] - enum BridgedChainCall {} - #[derive(Clone, Debug)] - struct BridgedChainOrigin; - - impl From - for Result, BridgedChainOrigin> - { - fn from( - _origin: BridgedChainOrigin, - ) -> Result, BridgedChainOrigin> { - unreachable!() - } - } - - macro_rules! impl_wrapped_balance { - ($name:ident) => { - #[derive(Debug, PartialEq, Decode, Encode, Clone, Copy)] - struct $name(u32); - - impl From for $name { - fn from(balance: u32) -> Self { - Self(balance) - } - } - - impl sp_std::ops::Add for $name { - type Output = $name; - - fn add(self, other: Self) -> Self { - Self(self.0 + other.0) - } - } - - impl sp_std::ops::Div for $name { - type Output = $name; - - fn div(self, other: Self) -> Self { - Self(self.0 / other.0) - } - } - - impl sp_std::ops::Mul for $name { - type Output = $name; - - fn mul(self, other: Self) -> Self { - Self(self.0 * other.0) - } - } - - impl sp_std::cmp::PartialOrd for $name { - fn partial_cmp(&self, other: &Self) -> Option { - self.0.partial_cmp(&other.0) - } - } - - impl CheckedAdd for $name { - fn checked_add(&self, other: &Self) -> Option { - self.0.checked_add(other.0).map(Self) - } - } - - impl CheckedDiv for $name { - fn checked_div(&self, other: &Self) -> Option { - self.0.checked_div(other.0).map(Self) - } - } - - impl CheckedMul for $name { - fn checked_mul(&self, other: &Self) -> Option { - self.0.checked_mul(other.0).map(Self) - } - } - }; - } - - impl_wrapped_balance!(ThisChainBalance); - impl_wrapped_balance!(BridgedChainBalance); - - struct ThisChain; - - impl ChainWithMessages for ThisChain { - type Hash = (); - type AccountId = ThisChainAccountId; - type Signer = ThisChainSigner; - type Signature = ThisChainSignature; - type Weight = frame_support::weights::Weight; - type Balance = ThisChainBalance; - } - - impl ThisChainWithMessages for ThisChain { - type Origin = ThisChainOrigin; - type Call = ThisChainCall; - - fn is_message_accepted(_send_origin: &Self::Origin, lane: &LaneId) -> bool { - lane == TEST_LANE_ID - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE - } - - fn estimate_delivery_confirmation_transaction() -> MessageTransaction> { - MessageTransaction { - dispatch_weight: DELIVERY_CONFIRMATION_TRANSACTION_WEIGHT, - size: 0, - } - } - - fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { - ThisChainBalance( - transaction.dispatch_weight as u32 * THIS_CHAIN_WEIGHT_TO_BALANCE_RATE as u32, - ) - } - } - - impl BridgedChainWithMessages for ThisChain { - fn maximal_extrinsic_size() -> u32 { - unreachable!() - } - - fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { - unreachable!() - } - - fn estimate_delivery_transaction( - _message_payload: &[u8], - _include_pay_dispatch_fee_cost: bool, - _message_dispatch_weight: WeightOf, - ) -> MessageTransaction> { - unreachable!() - } - - fn transaction_payment( - _transaction: MessageTransaction>, - ) -> BalanceOf { - unreachable!() - } - } - - struct BridgedChain; - - impl ChainWithMessages for BridgedChain { - type Hash = (); - type AccountId = BridgedChainAccountId; - type Signer = BridgedChainSigner; - type Signature = BridgedChainSignature; - type Weight = frame_support::weights::Weight; - type Balance = BridgedChainBalance; - } - - impl ThisChainWithMessages for BridgedChain { - type Origin = BridgedChainOrigin; - type Call = BridgedChainCall; - - fn is_message_accepted(_send_origin: &Self::Origin, _lane: &LaneId) -> bool { - unreachable!() - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - unreachable!() - } - - fn estimate_delivery_confirmation_transaction() -> MessageTransaction> { - unreachable!() - } - - fn transaction_payment( - _transaction: MessageTransaction>, - ) -> BalanceOf { - unreachable!() - } - } - - impl BridgedChainWithMessages for BridgedChain { - fn maximal_extrinsic_size() -> u32 { - BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE - } - - fn message_weight_limits(message_payload: &[u8]) -> RangeInclusive { - let begin = - std::cmp::min(BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, message_payload.len() as Weight); - begin..=BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT - } - - fn estimate_delivery_transaction( - _message_payload: &[u8], - _include_pay_dispatch_fee_cost: bool, - message_dispatch_weight: WeightOf, - ) -> MessageTransaction> { - MessageTransaction { - dispatch_weight: DELIVERY_TRANSACTION_WEIGHT + message_dispatch_weight, - size: 0, - } - } - - fn transaction_payment(transaction: MessageTransaction>) -> BalanceOf { - BridgedChainBalance( - transaction.dispatch_weight as u32 * BRIDGED_CHAIN_WEIGHT_TO_BALANCE_RATE as u32, - ) - } - } - - fn test_lane_outbound_data() -> OutboundLaneData { - OutboundLaneData::default() - } - - #[test] - fn message_from_bridged_chain_is_decoded() { - // the message is encoded on the bridged chain - let message_on_bridged_chain = - source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - call: ThisChainCall::Transfer.encode(), - } - .encode(); - - // and sent to this chain where it is decoded - let message_on_this_chain = - target::FromBridgedChainMessagePayload::::decode( - &mut &message_on_bridged_chain[..], - ) - .unwrap(); - assert_eq!( - message_on_this_chain, - target::FromBridgedChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - call: target::FromBridgedChainEncodedMessageCall::::new( - ThisChainCall::Transfer.encode(), - ), - } - ); - assert_eq!(Ok(ThisChainCall::Transfer), message_on_this_chain.call.into()); - } - - const TEST_LANE_ID: &LaneId = b"test"; - const MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE: MessageNonce = 32; - - fn regular_outbound_message_payload() -> source::FromThisChainMessagePayload - { - source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![42], - } - } - - #[test] - fn message_fee_is_checked_by_verifier() { - const EXPECTED_MINIMAL_FEE: u32 = 5500; - - // payload of the This -> Bridged chain message - let payload = regular_outbound_message_payload(); - - // let's check if estimation matching hardcoded value - assert_eq!( - source::estimate_message_dispatch_and_delivery_fee::( - &payload, - OnThisChainBridge::RELAYER_FEE_PERCENT, - None, - ), - Ok(ThisChainBalance(EXPECTED_MINIMAL_FEE)), - ); - - // let's check if estimation is less than hardcoded, if dispatch is paid at target chain - let mut payload_with_pay_on_target = regular_outbound_message_payload(); - payload_with_pay_on_target.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; - let fee_at_source = - source::estimate_message_dispatch_and_delivery_fee::( - &payload_with_pay_on_target, - OnThisChainBridge::RELAYER_FEE_PERCENT, - None, - ) - .expect( - "estimate_message_dispatch_and_delivery_fee failed for pay-at-target-chain message", - ); - assert!( - fee_at_source < EXPECTED_MINIMAL_FEE.into(), - "Computed fee {:?} without prepaid dispatch must be less than the fee with prepaid dispatch {}", - fee_at_source, - EXPECTED_MINIMAL_FEE, - ); - - // and now check that the verifier checks the fee - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Root)), - &ThisChainBalance(1), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::TOO_LOW_FEE) - ); - assert!(source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Root)), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(),); - } - - #[test] - fn should_disallow_root_calls_from_regular_accounts() { - // payload of the This -> Bridged chain message - let payload = source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![42], - }; - - // and now check that the verifier checks the fee - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Signed(ThisChainAccountId(0)))), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::BAD_ORIGIN) - ); - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::None)), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::BAD_ORIGIN) - ); - assert!(source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Root)), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(),); - } - - #[test] - fn should_verify_source_and_target_origin_matching() { - // payload of the This -> Bridged chain message - let payload = source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 100, - origin: bp_message_dispatch::CallOrigin::SourceAccount(ThisChainAccountId(1)), - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![42], - }; - - // and now check that the verifier checks the fee - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Signed(ThisChainAccountId(0)))), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ), - Err(source::BAD_ORIGIN) - ); - assert!(source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Signed(ThisChainAccountId(1)))), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &test_lane_outbound_data(), - &payload, - ) - .is_ok(),); - } - - #[test] - fn message_is_rejected_when_sent_using_disabled_lane() { - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Root)), - &ThisChainBalance(1_000_000), - b"dsbl", - &test_lane_outbound_data(), - ®ular_outbound_message_payload(), - ), - Err(source::MESSAGE_REJECTED_BY_OUTBOUND_LANE) - ); - } - - #[test] - fn message_is_rejected_when_there_are_too_many_pending_messages_at_outbound_lane() { - assert_eq!( - source::FromThisChainMessageVerifier::::verify_message( - &ThisChainOrigin(Ok(frame_system::RawOrigin::Root)), - &ThisChainBalance(1_000_000), - TEST_LANE_ID, - &OutboundLaneData { - latest_received_nonce: 100, - latest_generated_nonce: 100 + MAXIMAL_PENDING_MESSAGES_AT_TEST_LANE + 1, - ..Default::default() - }, - ®ular_outbound_message_payload(), - ), - Err(source::TOO_MANY_PENDING_MESSAGES) - ); - } - - #[test] - fn verify_chain_message_rejects_message_with_too_small_declared_weight() { - assert!(source::verify_chain_message::( - &source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: 5, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![1, 2, 3, 4, 5, 6], - }, - ) - .is_err()); - } - - #[test] - fn verify_chain_message_rejects_message_with_too_large_declared_weight() { - assert!(source::verify_chain_message::( - &source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT + 1, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![1, 2, 3, 4, 5, 6], - }, - ) - .is_err()); - } - - #[test] - fn verify_chain_message_rejects_message_too_large_message() { - assert!(source::verify_chain_message::( - &source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![0; source::maximal_message_size::() as usize + 1], - }, - ) - .is_err()); - } - - #[test] - fn verify_chain_message_accepts_maximal_message() { - assert_eq!( - source::verify_chain_message::( - &source::FromThisChainMessagePayload:: { - spec_version: 1, - weight: BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT, - origin: bp_message_dispatch::CallOrigin::SourceRoot, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: vec![0; source::maximal_message_size::() as _], - }, - ), - Ok(()), - ); - } - - #[derive(Debug)] - struct TestMessageProofParser { - failing: bool, - messages: RangeInclusive, - outbound_lane_data: Option, - } - - impl target::MessageProofParser for TestMessageProofParser { - fn read_raw_outbound_lane_data(&self, _lane_id: &LaneId) -> Option> { - if self.failing { - Some(vec![]) - } else { - self.outbound_lane_data.clone().map(|data| data.encode()) - } - } - - fn read_raw_message(&self, message_key: &MessageKey) -> Option> { - if self.failing { - Some(vec![]) - } else if self.messages.contains(&message_key.nonce) { - Some( - MessageData:: { - payload: message_key.nonce.encode(), - fee: BridgedChainBalance(0), - } - .encode(), - ) - } else { - None - } - } - } - - #[allow(clippy::reversed_empty_ranges)] - fn no_messages_range() -> RangeInclusive { - 1..=0 - } - - fn messages_proof(nonces_end: MessageNonce) -> target::FromBridgedChainMessagesProof<()> { - target::FromBridgedChainMessagesProof { - bridged_header_hash: (), - storage_proof: vec![], - lane: Default::default(), - nonces_start: 1, - nonces_end, - } - } - - #[test] - fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 5, - |_, _| unreachable!(), - ), - Err(target::MessageProofError::MessagesCountMismatch), - ); - } - - #[test] - fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 15, - |_, _| unreachable!(), - ), - Err(target::MessageProofError::MessagesCountMismatch), - ); - } - - #[test] - fn message_proof_is_rejected_if_build_parser_fails() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 10, - |_, _| Err(target::MessageProofError::Custom("test")), - ), - Err(target::MessageProofError::Custom("test")), - ); - } - - #[test] - fn message_proof_is_rejected_if_required_message_is_missing() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 10, - |_, _| Ok(TestMessageProofParser { - failing: false, - messages: 1..=5, - outbound_lane_data: None, - }), - ), - Err(target::MessageProofError::MissingRequiredMessage), - ); - } - - #[test] - fn message_proof_is_rejected_if_message_decode_fails() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(10), - 10, - |_, _| Ok(TestMessageProofParser { - failing: true, - messages: 1..=10, - outbound_lane_data: None, - }), - ), - Err(target::MessageProofError::FailedToDecodeMessage), - ); - } - - #[test] - fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(0), - 0, - |_, _| Ok(TestMessageProofParser { - failing: true, - messages: no_messages_range(), - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - }), - ), - Err(target::MessageProofError::FailedToDecodeOutboundLaneState), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_is_empty() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(0), - 0, - |_, _| Ok(TestMessageProofParser { - failing: false, - messages: no_messages_range(), - outbound_lane_data: None, - }), - ), - Err(target::MessageProofError::Empty), - ); - } - - #[test] - fn non_empty_message_proof_without_messages_is_accepted() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(0), - 0, - |_, _| Ok(TestMessageProofParser { - failing: false, - messages: no_messages_range(), - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - }), - ), - Ok(vec![( - Default::default(), - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: Vec::new(), - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn non_empty_message_proof_is_accepted() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(1), - 1, - |_, _| Ok(TestMessageProofParser { - failing: false, - messages: 1..=1, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - }), - ), - Ok(vec![( - Default::default(), - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: vec![Message { - key: MessageKey { lane_id: Default::default(), nonce: 1 }, - data: MessageData { payload: 1u64.encode(), fee: BridgedChainBalance(0) }, - }], - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn verify_messages_proof_with_parser_does_not_panic_if_messages_count_mismatches() { - assert_eq!( - target::verify_messages_proof_with_parser::( - messages_proof(u64::MAX), - 0, - |_, _| Ok(TestMessageProofParser { - failing: false, - messages: 0..=u64::MAX, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - }), - ), - Err(target::MessageProofError::MessagesCountMismatch), - ); - } - - #[test] - fn transaction_payment_works_with_zero_multiplier() { - use sp_runtime::traits::Zero; - - assert_eq!( - transaction_payment( - 100, - 10, - FixedU128::zero(), - |weight| weight, - MessageTransaction { size: 50, dispatch_weight: 777 }, - ), - 100 + 50 * 10, - ); - } - - #[test] - fn transaction_payment_works_with_non_zero_multiplier() { - use sp_runtime::traits::One; - - assert_eq!( - transaction_payment( - 100, - 10, - FixedU128::one(), - |weight| weight, - MessageTransaction { size: 50, dispatch_weight: 777 }, - ), - 100 + 50 * 10 + 777, - ); - } - - #[test] - fn conversion_rate_override_works() { - let payload = regular_outbound_message_payload(); - let regular_fee = source::estimate_message_dispatch_and_delivery_fee::( - &payload, - OnThisChainBridge::RELAYER_FEE_PERCENT, - None, - ); - let overrided_fee = source::estimate_message_dispatch_and_delivery_fee::( - &payload, - OnThisChainBridge::RELAYER_FEE_PERCENT, - Some(FixedU128::from_float((BRIDGED_CHAIN_TO_THIS_CHAIN_BALANCE_RATE * 2) as f64)), - ); - - assert!(regular_fee < overrided_fee); - } -} diff --git a/polkadot/bridges/bin/runtime-common/src/messages_api.rs b/polkadot/bridges/bin/runtime-common/src/messages_api.rs deleted file mode 100644 index b09a88e6279..00000000000 --- a/polkadot/bridges/bin/runtime-common/src/messages_api.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Helpers for implementing various message-related runtime API mthods. - -use crate::messages::{source::FromThisChainMessagePayload, MessageBridge}; - -use bp_messages::{LaneId, MessageDetails, MessageNonce}; -use codec::Decode; -use sp_std::vec::Vec; - -/// Implementation of the `To*OutboundLaneApi::message_details`. -pub fn outbound_message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, -) -> Vec> -where - Runtime: pallet_bridge_messages::Config, - MessagesPalletInstance: 'static, - BridgeConfig: MessageBridge, -{ - (begin..=end) - .filter_map(|nonce| { - let message_data = - pallet_bridge_messages::Pallet::::outbound_message_data(lane, nonce)?; - let decoded_payload = - FromThisChainMessagePayload::::decode(&mut &message_data.payload[..]).ok()?; - Some(MessageDetails { - nonce, - dispatch_weight: decoded_payload.weight, - size: message_data.payload.len() as _, - delivery_and_dispatch_fee: message_data.fee, - dispatch_fee_payment: decoded_payload.dispatch_fee_payment, - }) - }) - .collect() -} diff --git a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs b/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs deleted file mode 100644 index 5e20078a256..00000000000 --- a/polkadot/bridges/bin/runtime-common/src/messages_benchmarking.rs +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to run benchmarks of messages module, based on -//! `bridge_runtime_common::messages` implementation. - -#![cfg(feature = "runtime-benchmarks")] - -use crate::messages::{ - source::{FromBridgedChainMessagesDeliveryProof, FromThisChainMessagePayload}, - target::FromBridgedChainMessagesProof, - AccountIdOf, BalanceOf, BridgedChain, CallOf, HashOf, MessageBridge, RawStorageProof, - SignatureOf, SignerOf, ThisChain, -}; - -use bp_messages::{storage_keys, MessageData, MessageKey, MessagePayload}; -use bp_runtime::{messages::DispatchFeePayment, ChainId}; -use codec::Encode; -use ed25519_dalek::{PublicKey, SecretKey, Signer, KEYPAIR_LENGTH, SECRET_KEY_LENGTH}; -use frame_support::{ - traits::Currency, - weights::{GetDispatchInfo, Weight}, -}; -use pallet_bridge_messages::benchmarking::{ - MessageDeliveryProofParams, MessageParams, MessageProofParams, ProofSize, -}; -use sp_core::Hasher; -use sp_runtime::traits::{Header, IdentifyAccount, MaybeSerializeDeserialize, Zero}; -use sp_std::{fmt::Debug, prelude::*}; -use sp_trie::{record_all_keys, trie_types::TrieDBMutV1, LayoutV1, MemoryDB, Recorder, TrieMut}; -use sp_version::RuntimeVersion; - -/// Return this chain account, used to dispatch message. -pub fn dispatch_account() -> AccountIdOf> -where - B: MessageBridge, - SignerOf>: - From + IdentifyAccount>>, -{ - let this_raw_public = PublicKey::from(&dispatch_account_secret()); - let this_public: SignerOf> = - sp_core::ed25519::Public::from_raw(this_raw_public.to_bytes()).into(); - this_public.into_account() -} - -/// Return public key of this chain account, used to dispatch message. -pub fn dispatch_account_secret() -> SecretKey { - // key from the repo example (https://docs.rs/ed25519-dalek/1.0.1/ed25519_dalek/struct.SecretKey.html) - SecretKey::from_bytes(&[ - 157, 097, 177, 157, 239, 253, 090, 096, 186, 132, 074, 244, 146, 236, 044, 196, 068, 073, - 197, 105, 123, 050, 105, 025, 112, 059, 172, 003, 028, 174, 127, 096, - ]) - .expect("harcoded key is valid") -} - -/// Prepare outbound message for the `send_message` call. -pub fn prepare_outbound_message( - params: MessageParams>>, -) -> FromThisChainMessagePayload -where - B: MessageBridge, - BalanceOf>: From, -{ - let message_payload = vec![0; params.size as usize]; - let dispatch_origin = bp_message_dispatch::CallOrigin::SourceAccount(params.sender_account); - - FromThisChainMessagePayload:: { - spec_version: 0, - weight: params.size as _, - origin: dispatch_origin, - call: message_payload, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - } -} - -/// Prepare proof of messages for the `receive_messages_proof` call. -/// -/// In addition to returning valid messages proof, environment is prepared to verify this message -/// proof. -pub fn prepare_message_proof( - params: MessageProofParams, - version: &RuntimeVersion, - endow_amount: BalanceOf>, -) -> (FromBridgedChainMessagesProof>>, Weight) -where - R: frame_system::Config>> - + pallet_balances::Config>> - + pallet_bridge_grandpa::Config, - R::BridgedChain: bp_runtime::Chain
, - B: MessageBridge, - BI: 'static, - FI: 'static, - BH: Header>>, - BHH: Hasher>>, - AccountIdOf>: PartialEq + sp_std::fmt::Debug, - AccountIdOf>: From<[u8; 32]>, - BalanceOf>: Debug + MaybeSerializeDeserialize, - CallOf>: From> + GetDispatchInfo, - HashOf>: Copy + Default, - SignatureOf>: From, - SignerOf>: Clone - + From - + IdentifyAccount>>, -{ - // we'll be dispatching the same call at This chain - let remark = match params.size { - ProofSize::Minimal(ref size) => vec![0u8; *size as _], - _ => vec![], - }; - let call: CallOf> = frame_system::Call::remark { remark }.into(); - let call_weight = call.get_dispatch_info().weight; - - // message payload needs to be signed, because we use `TargetAccount` call origin - // (which is 'heaviest' to verify) - let bridged_account_id: AccountIdOf> = [0u8; 32].into(); - let (this_raw_public, this_raw_signature) = ed25519_sign( - &call, - &bridged_account_id, - version.spec_version, - B::BRIDGED_CHAIN_ID, - B::THIS_CHAIN_ID, - ); - let this_public: SignerOf> = - sp_core::ed25519::Public::from_raw(this_raw_public).into(); - let this_signature: SignatureOf> = - sp_core::ed25519::Signature::from_raw(this_raw_signature).into(); - - // if dispatch fee is paid at this chain, endow relayer account - if params.dispatch_fee_payment == DispatchFeePayment::AtTargetChain { - assert_eq!(this_public.clone().into_account(), dispatch_account::()); - pallet_balances::Pallet::::make_free_balance_be( - &this_public.clone().into_account(), - endow_amount, - ); - } - - // prepare message payload that is stored in the Bridged chain storage - let message_payload = bp_message_dispatch::MessagePayload { - spec_version: version.spec_version, - weight: call_weight, - origin: bp_message_dispatch::CallOrigin::< - AccountIdOf>, - SignerOf>, - SignatureOf>, - >::TargetAccount(bridged_account_id, this_public, this_signature), - dispatch_fee_payment: params.dispatch_fee_payment.clone(), - call: call.encode(), - } - .encode(); - - // finally - prepare storage proof and update environment - let (state_root, storage_proof) = - prepare_messages_storage_proof::(¶ms, message_payload); - let bridged_header_hash = insert_bridged_chain_header::(state_root); - - ( - FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane: params.lane, - nonces_start: *params.message_nonces.start(), - nonces_end: *params.message_nonces.end(), - }, - call_weight - .checked_mul( - params.message_nonces.end().saturating_sub(*params.message_nonces.start()) + 1, - ) - .expect("too many messages requested by benchmark"), - ) -} - -/// Prepare proof of messages delivery for the `receive_messages_delivery_proof` call. -pub fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> -where - R: pallet_bridge_grandpa::Config, - R::BridgedChain: bp_runtime::Chain
, - FI: 'static, - B: MessageBridge, - BH: Header>>, - BHH: Hasher>>, - HashOf>: Copy + Default, -{ - // prepare Bridged chain storage with inbound lane state - let storage_key = - storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, ¶ms.lane).0; - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = TrieDBMutV1::::new(&mut mdb, &mut root); - trie.insert(&storage_key, ¶ms.inbound_lane_data.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - } - root = grow_trie(root, &mut mdb, params.size); - - // generate storage proof to be delivered to This chain - let mut proof_recorder = Recorder::::new(); - record_all_keys::, _>(&mdb, &root, &mut proof_recorder) - .map_err(|_| "record_all_keys has failed") - .expect("record_all_keys should not fail in benchmarks"); - let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect(); - - // finally insert header with given state root to our storage - let bridged_header_hash = insert_bridged_chain_header::(root); - - FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: bridged_header_hash.into(), - storage_proof, - lane: params.lane, - } -} - -/// Prepare storage proof of given messages. -/// -/// Returns state trie root and nodes with prepared messages. -fn prepare_messages_storage_proof( - params: &MessageProofParams, - message_payload: MessagePayload, -) -> (HashOf>, RawStorageProof) -where - B: MessageBridge, - BHH: Hasher>>, - HashOf>: Copy + Default, -{ - // prepare Bridged chain storage with messages and (optionally) outbound lane state - let message_count = - params.message_nonces.end().saturating_sub(*params.message_nonces.start()) + 1; - let mut storage_keys = Vec::with_capacity(message_count as usize + 1); - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = TrieDBMutV1::::new(&mut mdb, &mut root); - - // insert messages - for nonce in params.message_nonces.clone() { - let message_key = MessageKey { lane_id: params.lane, nonce }; - let message_data = MessageData { - fee: BalanceOf::>::from(0), - payload: message_payload.clone(), - }; - let storage_key = storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &message_key.lane_id, - message_key.nonce, - ) - .0; - trie.insert(&storage_key, &message_data.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - } - - // insert outbound lane state - if let Some(ref outbound_lane_data) = params.outbound_lane_data { - let storage_key = - storage_keys::outbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, ¶ms.lane) - .0; - trie.insert(&storage_key, &outbound_lane_data.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - } - } - root = grow_trie(root, &mut mdb, params.size); - - // generate storage proof to be delivered to This chain - let mut proof_recorder = Recorder::::new(); - record_all_keys::, _>(&mdb, &root, &mut proof_recorder) - .map_err(|_| "record_all_keys has failed") - .expect("record_all_keys should not fail in benchmarks"); - let storage_proof = proof_recorder.drain().into_iter().map(|n| n.data.to_vec()).collect(); - - (root, storage_proof) -} - -/// Insert Bridged chain header with given state root into storage of GRANDPA pallet at This chain. -fn insert_bridged_chain_header( - state_root: HashOf>, -) -> HashOf> -where - R: pallet_bridge_grandpa::Config, - R::BridgedChain: bp_runtime::Chain
, - FI: 'static, - B: MessageBridge, - BH: Header>>, - HashOf>: Default, -{ - let bridged_header = BH::new( - Zero::zero(), - Default::default(), - state_root, - Default::default(), - Default::default(), - ); - let bridged_header_hash = bridged_header.hash(); - pallet_bridge_grandpa::initialize_for_benchmarks::(bridged_header); - bridged_header_hash -} - -/// Generate ed25519 signature to be used in -/// `pallet_brdige_call_dispatch::CallOrigin::TargetAccount`. -/// -/// Returns public key of the signer and the signature itself. -fn ed25519_sign( - target_call: &impl Encode, - source_account_id: &impl Encode, - target_spec_version: u32, - source_chain_id: ChainId, - target_chain_id: ChainId, -) -> ([u8; 32], [u8; 64]) { - let target_secret = dispatch_account_secret(); - let target_public: PublicKey = (&target_secret).into(); - - let mut target_pair_bytes = [0u8; KEYPAIR_LENGTH]; - target_pair_bytes[..SECRET_KEY_LENGTH].copy_from_slice(&target_secret.to_bytes()); - target_pair_bytes[SECRET_KEY_LENGTH..].copy_from_slice(&target_public.to_bytes()); - let target_pair = - ed25519_dalek::Keypair::from_bytes(&target_pair_bytes).expect("hardcoded pair is valid"); - - let signature_message = pallet_bridge_dispatch::account_ownership_digest( - target_call, - source_account_id, - target_spec_version, - source_chain_id, - target_chain_id, - ); - let target_origin_signature = target_pair - .try_sign(&signature_message) - .expect("Ed25519 try_sign should not fail in benchmarks"); - - (target_public.to_bytes(), target_origin_signature.to_bytes()) -} - -/// Populate trie with dummy keys+values until trie has at least given size. -fn grow_trie(mut root: H::Out, mdb: &mut MemoryDB, trie_size: ProofSize) -> H::Out { - let (iterations, leaf_size, minimal_trie_size) = match trie_size { - ProofSize::Minimal(_) => return root, - ProofSize::HasLargeLeaf(size) => (1, size, size), - ProofSize::HasExtraNodes(size) => (8, 1, size), - }; - - let mut key_index = 0u32; - loop { - // generate storage proof to be delivered to This chain - let mut proof_recorder = Recorder::::new(); - record_all_keys::, _>(mdb, &root, &mut proof_recorder) - .map_err(|_| "record_all_keys has failed") - .expect("record_all_keys should not fail in benchmarks"); - let size: usize = proof_recorder.drain().into_iter().map(|n| n.data.len()).sum(); - if size > minimal_trie_size as _ { - return root - } - - let mut trie = TrieDBMutV1::::from_existing(mdb, &mut root) - .map_err(|_| "TrieDBMutV1::from_existing has failed") - .expect("TrieDBMutV1::from_existing should not fail in benchmarks"); - for _ in 0..iterations { - trie.insert(&key_index.encode(), &vec![42u8; leaf_size as _]) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - key_index += 1; - } - trie.commit(); - } -} diff --git a/polkadot/bridges/ci.Dockerfile b/polkadot/bridges/ci.Dockerfile deleted file mode 100644 index b419f6be54d..00000000000 --- a/polkadot/bridges/ci.Dockerfile +++ /dev/null @@ -1,53 +0,0 @@ -# This file is a "runtime" part from a builder-pattern in Dockerfile, it's used in CI. -# The only different part is that the compilation happens externally, -# so COPY has a different source. -FROM docker.io/library/ubuntu:20.04 - -# show backtraces -ENV RUST_BACKTRACE 1 -ENV DEBIAN_FRONTEND=noninteractive - -RUN set -eux; \ - apt-get update && \ - apt-get install -y --no-install-recommends \ - curl ca-certificates libssl-dev && \ - update-ca-certificates && \ - groupadd -g 1000 user && \ - useradd -u 1000 -g user -s /bin/sh -m user && \ - # apt clean up - apt-get autoremove -y && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* - -# switch to non-root user -USER user - -WORKDIR /home/user - -ARG PROJECT=substrate-relay - -COPY --chown=user:user ./${PROJECT} ./ -COPY --chown=user:user ./bridge-entrypoint.sh ./ - -# check if executable works in this container -RUN ./${PROJECT} --version - -ENV PROJECT=$PROJECT -ENTRYPOINT ["/home/user/bridge-entrypoint.sh"] - -# metadata -ARG VCS_REF=master -ARG BUILD_DATE="" -ARG VERSION="" - -LABEL org.opencontainers.image.title="${PROJECT}" \ - org.opencontainers.image.description="${PROJECT} - component of Parity Bridges Common" \ - org.opencontainers.image.source="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/ci.Dockerfile" \ - org.opencontainers.image.url="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/ci.Dockerfile" \ - org.opencontainers.image.documentation="https://github.com/paritytech/parity-bridges-common/blob/${VCS_REF}/README.md" \ - org.opencontainers.image.created="${BUILD_DATE}" \ - org.opencontainers.image.version="${VERSION}" \ - org.opencontainers.image.revision="${VCS_REF}" \ - org.opencontainers.image.authors="devops-team@parity.io" \ - org.opencontainers.image.vendor="Parity Technologies" \ - org.opencontainers.image.licenses="GPL-3.0 License" diff --git a/polkadot/bridges/deny.toml b/polkadot/bridges/deny.toml deleted file mode 100644 index e5281e0e849..00000000000 --- a/polkadot/bridges/deny.toml +++ /dev/null @@ -1,202 +0,0 @@ -# This template contains all of the possible sections and their default values - -# Note that all fields that take a lint level have these possible values: -# * deny - An error will be produced and the check will fail -# * warn - A warning will be produced, but the check will not fail -# * allow - No warning or error will be produced, though in some cases a note -# will be - -# The values provided in this template are the default values that will be used -# when any section or field is not specified in your own configuration - -# If 1 or more target triples (and optionally, target_features) are specified, -# only the specified targets will be checked when running `cargo deny check`. -# This means, if a particular package is only ever used as a target specific -# dependency, such as, for example, the `nix` crate only being used via the -# `target_family = "unix"` configuration, that only having windows targets in -# this list would mean the nix crate, as well as any of its exclusive -# dependencies not shared by any other crates, would be ignored, as the target -# list here is effectively saying which targets you are building for. -targets = [ - # The triple can be any string, but only the target triples built in to - # rustc (as of 1.40) can be checked against actual config expressions - #{ triple = "x86_64-unknown-linux-musl" }, - # You can also specify which target_features you promise are enabled for a - # particular target. target_features are currently not validated against - # the actual valid features supported by the target architecture. - #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, -] - -# This section is considered when running `cargo deny check advisories` -# More documentation for the advisories section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html -[advisories] -# The path where the advisory database is cloned/fetched into -db-path = "~/.cargo/advisory-db" -# The url of the advisory database to use -db-urls = ["https://github.com/rustsec/advisory-db"] -# The lint level for security vulnerabilities -vulnerability = "deny" -# The lint level for unmaintained crates -unmaintained = "warn" -# The lint level for crates that have been yanked from their source registry -yanked = "warn" -# The lint level for crates with security notices. Note that as of -# 2019-12-17 there are no security notice advisories in -# https://github.com/rustsec/advisory-db -notice = "warn" -# A list of advisory IDs to ignore. Note that ignored advisories will still -# output a note when they are encountered. -ignore = [ - "RUSTSEC-2020-0070", - # Comes from honggfuzz via storage-proof-fuzzer: 'memmap' - "RUSTSEC-2020-0077", - # net2 (origin: Substrate RPC crates) - "RUSTSEC-2020-0016", - # time (origin: Substrate RPC + benchmarking crates) - "RUSTSEC-2020-0071", - # chrono (origin: Substrate benchmarking + cli + ...) - "RUSTSEC-2020-0159", - # lru 0.6.6 (origin: libp2p) - "RUSTSEC-2021-0130", -] -# Threshold for security vulnerabilities, any vulnerability with a CVSS score -# lower than the range specified will be ignored. Note that ignored advisories -# will still output a note when they are encountered. -# * None - CVSS Score 0.0 -# * Low - CVSS Score 0.1 - 3.9 -# * Medium - CVSS Score 4.0 - 6.9 -# * High - CVSS Score 7.0 - 8.9 -# * Critical - CVSS Score 9.0 - 10.0 -#severity-threshold = - -# This section is considered when running `cargo deny check licenses` -# More documentation for the licenses section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html -[licenses] -# The lint level for crates which do not have a detectable license -unlicensed = "allow" -# List of explictly allowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. -allow = [ - "BlueOak-1.0.0" -] -# List of explictly disallowed licenses -# See https://spdx.org/licenses/ for list of possible licenses -# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. -deny = [ - #"Nokia", -] -# Lint level for licenses considered copyleft -copyleft = "allow" -# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses -# * both - The license will be approved if it is both OSI-approved *AND* FSF -# * either - The license will be approved if it is either OSI-approved *OR* FSF -# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF -# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved -# * neither - This predicate is ignored and the default lint level is used -allow-osi-fsf-free = "either" -# Lint level used when no other predicates are matched -# 1. License isn't in the allow or deny lists -# 2. License isn't copyleft -# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" -default = "deny" -# The confidence threshold for detecting a license from license text. -# The higher the value, the more closely the license text must be to the -# canonical license text of a valid SPDX license file. -# [possible values: any between 0.0 and 1.0]. -confidence-threshold = 0.9 -# Allow 1 or more licenses on a per-crate basis, so that particular licenses -# aren't accepted for every possible crate as with the normal allow list -exceptions = [ - # Each entry is the crate and version constraint, and its specific allow - # list - #{ allow = ["Zlib"], name = "adler32", version = "*" }, -] - -# Some crates don't have (easily) machine readable licensing information, -# adding a clarification entry for it allows you to manually specify the -# licensing information -[[licenses.clarify]] -# The name of the crate the clarification applies to -name = "ring" -# THe optional version constraint for the crate -#version = "*" -# The SPDX expression for the license requirements of the crate -expression = "OpenSSL" -# One or more files in the crate's source used as the "source of truth" for -# the license expression. If the contents match, the clarification will be used -# when running the license check, otherwise the clarification will be ignored -# and the crate will be checked normally, which may produce warnings or errors -# depending on the rest of your configuration -license-files = [ - # Each entry is a crate relative path, and the (opaque) hash of its contents - { path = "LICENSE", hash = 0xbd0eed23 } -] - -[[licenses.clarify]] -name = "webpki" -expression = "ISC" -license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] - -[licenses.private] -# If true, ignores workspace crates that aren't published, or are only -# published to private registries -ignore = false -# One or more private registries that you might publish crates to, if a crate -# is only published to private registries, and ignore is true, the crate will -# not have its license(s) checked -registries = [ - #"https://sekretz.com/registry -] - -# This section is considered when running `cargo deny check bans`. -# More documentation about the 'bans' section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html -[bans] -# Lint level for when multiple versions of the same crate are detected -multiple-versions = "warn" -# The graph highlighting used when creating dotgraphs for crates -# with multiple versions -# * lowest-version - The path to the lowest versioned duplicate is highlighted -# * simplest-path - The path to the version with the fewest edges is highlighted -# * all - Both lowest-version and simplest-path are used -highlight = "lowest-version" -# List of crates that are allowed. Use with care! -allow = [ - #{ name = "ansi_term", version = "=0.11.0" }, -] -# List of crates to deny -deny = [ - { name = "parity-util-mem", version = "<0.6" } - # Each entry the name of a crate and a version range. If version is - # not specified, all versions will be matched. -] -# Certain crates/versions that will be skipped when doing duplicate detection. -skip = [ - #{ name = "ansi_term", version = "=0.11.0" }, -] -# Similarly to `skip` allows you to skip certain crates during duplicate -# detection. Unlike skip, it also includes the entire tree of transitive -# dependencies starting at the specified crate, up to a certain depth, which is -# by default infinite -skip-tree = [ - #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, -] - -# This section is considered when running `cargo deny check sources`. -# More documentation about the 'sources' section can be found here: -# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html -[sources] -# Lint level for what to happen when a crate from a crate registry that is not -# in the allow list is encountered -unknown-registry = "deny" -# Lint level for what to happen when a crate from a git repository that is not -# in the allow list is encountered -unknown-git = "allow" -# List of URLs for allowed crate registries. Defaults to the crates.io index -# if not specified. If it is specified but empty, no registries are allowed. -allow-registry = ["https://github.com/rust-lang/crates.io-index"] -# List of URLs for allowed Git repositories -allow-git = [] diff --git a/polkadot/bridges/deployments/BridgeDeps.Dockerfile b/polkadot/bridges/deployments/BridgeDeps.Dockerfile deleted file mode 100644 index 6d3b3fa1704..00000000000 --- a/polkadot/bridges/deployments/BridgeDeps.Dockerfile +++ /dev/null @@ -1,32 +0,0 @@ -# Image with dependencies required to build projects from the bridge repo. -# -# This image is meant to be used as a building block when building images for -# the various components in the bridge repo, such as nodes and relayers. -FROM docker.io/library/ubuntu:20.04 - -ENV LAST_DEPS_UPDATE 2021-04-01 -ENV DEBIAN_FRONTEND=noninteractive - -RUN set -eux; \ - apt-get update && \ - apt-get install -y curl ca-certificates && \ - apt-get install -y cmake pkg-config libssl-dev git clang libclang-dev - -ENV LAST_CERTS_UPDATE 2021-04-01 - -RUN update-ca-certificates && \ - curl https://sh.rustup.rs -sSf | sh -s -- -y - -ENV PATH="/root/.cargo/bin:${PATH}" -ENV LAST_RUST_UPDATE 2021-04-01 - -RUN rustup update stable && \ - rustup install nightly && \ - rustup target add wasm32-unknown-unknown --toolchain nightly - -RUN rustc -vV && \ - cargo -V && \ - gcc -v && \ - cmake --version - -ENV RUST_BACKTRACE 1 diff --git a/polkadot/bridges/deployments/README.md b/polkadot/bridges/deployments/README.md deleted file mode 100644 index 920935d5fdb..00000000000 --- a/polkadot/bridges/deployments/README.md +++ /dev/null @@ -1,247 +0,0 @@ -# Bridge Deployments - -## Requirements -Make sure to install `docker` and `docker-compose` to be able to run and test bridge deployments. If -for whatever reason you can't or don't want to use Docker, you can find some scripts for running the -bridge [here](https://github.com/svyatonik/parity-bridges-common.test). - -## Networks -One of the building blocks we use for our deployments are _networks_. A network is a collection of -homogenous blockchain nodes. We have Docker Compose files for each network that we want to bridge. -Each of the compose files found in the `./networks` folder is able to independently spin up a -network like so: - -```bash -docker-compose -f ./networks/rialto.yml up -``` - -After running this command we would have a network of several nodes producing blocks. - -## Bridges -A _bridge_ is a way for several _networks_ to connect to one another. Bridge deployments have their -own Docker Compose files which can be found in the `./bridges` folder. These Compose files typically -contain bridge relayers, which are services external to blockchain nodes, and other components such -as testing infrastructure, or user interfaces. - -Unlike the network Compose files, these *cannot* be deployed on their own. They must be combined -with different networks. - -In general, we can deploy the bridge using `docker-compose up` in the following way: - -```bash -docker-compose -f .yml \ - -f .yml \ - -f .yml \ - -f .yml up -``` - -If you want to see how the Compose commands are actually run, check out the source code of the -[`./run.sh`](./run.sh). - -One thing worth noting is that we have a _monitoring_ Compose file. This adds support for Prometheus -and Grafana. We cover these in more details in the [Monitoring](#monitoring) section. At the moment -the monitoring Compose file is _not_ optional, and must be included for bridge deployments. - -### Running and Updating Deployments -We currently support two bridge deployments -1. Rialto Substrate to Millau Substrate -2. Westend Substrate to Millau Substrate - -These bridges can be deployed using our [`./run.sh`](./run.sh) script. - -The first argument it takes is the name of the bridge you want to run. Right now we only support two -bridges: `rialto-millau` and `westend-millau`. - -```bash -./run.sh rialto-millau -``` - -If you add a second `update` argument to the script it will pull the latest images from Docker Hub -and restart the deployment. - -```bash -./run.sh rialto-millau update -``` - -You can also bring down a deployment using the script with the `stop` argument. - -```bash -./run.sh rialto-millau stop -``` - -### Adding Deployments -We need two main things when adding a new deployment. First, the new network which we want to -bridge. A compose file for the network should be added in the `/networks/` folder. Secondly we'll -need a new bridge Compose file in `./bridges/`. This should configure the bridge relayer nodes -correctly for the two networks, and add any additional components needed for the deployment. If you -want you can also add support in the `./run` script for the new deployment. While recommended it's -not strictly required. - -## General Notes - -Rialto authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. -Millau authorities are named: `Alice`, `Bob`, `Charlie`, `Dave`, `Eve`. - -Both authorities and following accounts have enough funds (for test purposes) on corresponding Substrate chains: - -- on Rialto: `Ferdie`, `George`, `Harry`. -- on Millau: `Ferdie`, `George`, `Harry`. - -Names of accounts on Substrate (Rialto and Millau) chains may be prefixed with `//` and used as -seeds for the `sr25519` keys. This seed may also be used in the signer argument in Substrate relays. -Example: - -```bash -./substrate-relay relay-headers rialto-to-millau \ - --source-host rialto-node-alice \ - --source-port 9944 \ - --target-host millau-node-alice \ - --target-port 9944 \ - --source-signer //Harry \ - --prometheus-host=0.0.0.0 -``` - -Some accounts are used by bridge components. Using these accounts to sign other transactions -is not recommended, because this may lead to nonces conflict. - -Following accounts are used when `rialto-millau` bridge is running: - -- Millau's `Charlie` signs complex headers+messages relay transactions on Millau chain; -- Rialto's `Charlie` signs complex headers+messages relay transactions on Rialto chain; -- Millau's `Dave` signs Millau transactions which contain messages for Rialto; -- Rialto's `Dave` signs Rialto transactions which contain messages for Millau; -- Millau's `Eve` signs relay transactions with message delivery confirmations (lane 00000001) from Rialto to Millau; -- Rialto's `Eve` signs relay transactions with messages (lane 00000001) from Millau to Rialto; -- Millau's `Ferdie` signs relay transactions with messages (lane 00000001) from Rialto to Millau; -- Rialto's `Ferdie` signs relay transactions with message delivery confirmations (lane 00000001) from Millau to Rialto; -- Millau's `RialtoMessagesOwner` signs relay transactions with updated Rialto -> Millau conversion rate; -- Rialto's `MillauMessagesOwner` signs relay transactions with updated Millau -> Rialto conversion rate. - -Following accounts are used when `westend-millau` bridge is running: - -- Millau's `George` signs relay transactions with new Westend headers. - -### Docker Usage -When the network is running you can query logs from individual nodes using: - -```bash -docker logs rialto_millau-node-charlie_1 -f -``` - -To kill all leftover containers and start the network from scratch next time: -```bash -docker ps -a --format "{{.ID}}" | xargs docker rm # This removes all containers! -``` - -### Docker Compose Usage -If you're not familiar with how to use `docker-compose` here are some useful commands you'll need -when interacting with the bridge deployments: - -```bash -docker-compose pull # Get the latest images from the Docker Hub -docker-compose build # This is going to build images -docker-compose up # Start all the nodes -docker-compose up -d # Start the nodes in detached mode. -docker-compose down # Stop the network. -``` - -Note that for the you'll need to add the appropriate `-f` arguments that were mentioned in the -[Bridges](#bridges) section. You can read more about using multiple Compose files -[here](https://docs.docker.com/compose/extends/#multiple-compose-files). One thing worth noting is -that the _order_ the compose files are specified in matters. A different order will result in a -different configuration. - -You can sanity check the final config like so: - -```bash -docker-compose -f docker-compose.yml -f docker-compose.override.yml config > docker-compose.merged.yml -``` - -## Docker and Git Deployment -It is also possible to avoid using images from the Docker Hub and instead build -containers from Git. There are two ways to build the images this way. - -### Git Repo -If you have cloned the bridges repo you can build local Docker images by running the following -command at the top level of the repo: - -```bash -docker build . -t local/ --build-arg=PROJECT= -``` - -This will build a local image of a particular component with a tag of -`local/`. This tag can be used in Docker Compose files. - -You can configure the build using using Docker -[build arguments](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg). -Here are the arguments currently supported: - - `BRIDGE_REPO`: Git repository of the bridge node and relay code - - `BRIDGE_HASH`: Commit hash within that repo (can also be a branch or tag) - - `ETHEREUM_REPO`: Git repository of the OpenEthereum client - - `ETHEREUM_HASH`: Commit hash within that repo (can also be a branch or tag) - - `PROJECT`: Project to build withing bridges repo. Can be one of: - - `rialto-bridge-node` - - `millau-bridge-node` - - `substrate-relay` - -### GitHub Actions -We have a nightly job which runs and publishes Docker images for the different nodes and relayers to -the [ParityTech Docker Hub](https://hub.docker.com/u/paritytech) organization. These images are used -for our ephemeral (temporary) test networks. Additionally, any time a tag in the form of `v*` is -pushed to GitHub the publishing job is run. This will build all the components (nodes, relayers) and -publish them. - -With images built using either method, all you have to do to use them in a deployment is change the -`image` field in the existing Docker Compose files to point to the tag of the image you want to use. - -### Monitoring -[Prometheus](https://prometheus.io/) is used by the bridge relay to monitor information such as system -resource use, and block data (e.g the best blocks it knows about). In order to visualize this data -a [Grafana](https://grafana.com/) dashboard can be used. - -As part of the Rialto `docker-compose` setup we spin up a Prometheus server and Grafana dashboard. The -Prometheus server connects to the Prometheus data endpoint exposed by the bridge relay. The Grafana -dashboard uses the Prometheus server as its data source. - -The default port for the bridge relay's Prometheus data is `9616`. The host and port can be -configured though the `--prometheus-host` and `--prometheus-port` flags. The Prometheus server's -dashboard can be accessed at `http://localhost:9090`. The Grafana dashboard can be accessed at -`http://localhost:3000`. Note that the default log-in credentials for Grafana are `admin:admin`. - -### Environment Variables -Here is an example `.env` file which is used for production deployments and network updates. For -security reasons it is not kept as part of version control. When deploying a network this -file should be correctly populated and kept in the appropriate [`bridges`](`./bridges`) deployment -folder. - -The `UI_SUBSTRATE_PROVIDER` variable lets you define the url of the Substrate node that the user -interface will connect to. `UI_ETHEREUM_PROVIDER` is used only as a guidance for users to connect -Metamask to the right Ethereum network. `UI_EXPECTED_ETHEREUM_NETWORK_ID` is used by -the user interface as a fail safe to prevent users from connecting their Metamask extension to an -unexpected network. - -```bash -GRAFANA_ADMIN_PASS=admin_pass -GRAFANA_SERVER_ROOT_URL=%(protocol)s://%(domain)s:%(http_port)s/ -GRAFANA_SERVER_DOMAIN=server.domain.io -MATRIX_ACCESS_TOKEN="access-token" -WITH_PROXY=1 # Optional -UI_SUBSTRATE_PROVIDER=ws://localhost:9944 -UI_ETHEREUM_PROVIDER=http://localhost:8545 -UI_EXPECTED_ETHEREUM_NETWORK_ID=105 -``` - -### UI - -Use [wss://rialto.bridges.test-installations.parity.io/](https://polkadot.js.org/apps/) -as a custom endpoint for [https://polkadot.js.org/apps/](https://polkadot.js.org/apps/). - -### Polkadot.js UI - -To teach the UI decode our custom types used in the pallet, go to: `Settings -> Developer` -and import the [`./types.json`](./types.json) - -## Scripts - -The are some bash scripts in `scripts` folder that allow testing `Relay` -without running the entire network within docker. Use if needed for development. diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json deleted file mode 100644 index abce8bbc29a..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-millau-to-rialto-messages-dashboard.json +++ /dev/null @@ -1,1684 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 3, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_MessageLane_00000000_best_target_block_number", - "instant": false, - "interval": "", - "legendFormat": "At Rialto", - "refId": "A" - }, - { - "expr": "Millau_to_Rialto_MessageLane_00000000_best_target_at_source_block_number", - "instant": false, - "interval": "", - "legendFormat": "At Millau", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Rialto headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_MessageLane_00000000_best_source_block_number", - "interval": "", - "legendFormat": "At Millau", - "refId": "A" - }, - { - "expr": "Millau_to_Rialto_MessageLane_00000000_best_source_at_target_block_number", - "interval": "", - "legendFormat": "At Rialto", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Millau headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages generated at Millau are not detected by relay", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "sum" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "7m", - "frequency": "1m", - "handler": 1, - "name": "Messages from Millau to Rialto are not being delivered", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 20 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Undelivered messages at Rialto", - "refId": "A" - }, - { - "expr": "increase(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m]) OR on() vector(0)", - "interval": "", - "legendFormat": "Messages delivered to Rialto in last 1m", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race lags (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Too many unconfirmed messages", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 20 - }, - "hiddenSeries": false, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed messages at Millau", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race lags (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Rewards are not being confirmed", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 20 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Rialto", - "refId": "A" - }, - { - "expr": "(scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Millau_to_Rialto_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Rialto (zero if messages are not being delivered to Rialto)", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Reward lags (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "7m", - "frequency": "1m", - "handler": 1, - "name": "Messages (00000001) from Millau to Rialto are not being delivered", - "noDataState": "alerting", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 27 - }, - "hiddenSeries": false, - "id": 21, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "increase(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=\"target_latest_received\"}[10m]) OR on() vector(0)", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race (00000001)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages (00000001) from Millau to Rialto are not being confirmed", - "noDataState": "alerting", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 27 - }, - "hiddenSeries": false, - "id": 22, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "increase(Millau_to_Rialto_MessageLane_00000001_lane_state_nonces{type=\"source_latest_confirmed\"}[10m]) OR on() vector(0)", - "hide": true, - "interval": "", - "legendFormat": "", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race (00000001)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Token swap messages from Millau to Rialto are not being delivered", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 38 - }, - "hiddenSeries": false, - "id": 23, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Rialto\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "increase(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=\"target_latest_received\"}[20m]) OR on() vector(0)", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race (73776170)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 38 - }, - "hiddenSeries": false, - "id": 24, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Rialto to Millau\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Millau\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "increase(Millau_to_Rialto_MessageLane_73776170_lane_state_nonces{type=\"source_latest_confirmed\"}[10m]) OR on() vector(0)", - "hide": true, - "interval": "", - "legendFormat": "", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race (73776170)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 49 - }, - "id": 16, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-millau-rialto:9616'}[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay process CPU usage (1 CPU = 100)", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 49 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "system_average_load{instance='relay-millau-rialto:9616'}", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "System load average", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 49 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "process_memory_usage_bytes{instance='relay-millau-rialto:9616'} / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory used by relay process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Millau to Rialto Message Sync Dashboard", - "uid": "relay-millau-to-rialto-messages", - "version": 2 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json deleted file mode 100644 index 4e3d314a3f4..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/relay-rialto-to-millau-messages-dashboard.json +++ /dev/null @@ -1,1433 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 4, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_MessageLane_00000000_best_target_block_number", - "instant": false, - "interval": "", - "legendFormat": "At Millau", - "refId": "A" - }, - { - "expr": "Rialto_to_Millau_MessageLane_00000000_best_target_at_source_block_number", - "instant": false, - "interval": "", - "legendFormat": "At Rialto", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Millau headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_MessageLane_00000000_best_source_block_number", - "interval": "", - "legendFormat": "At Rialto", - "refId": "A" - }, - { - "expr": "Rialto_to_Millau_MessageLane_00000000_best_source_at_target_block_number", - "interval": "", - "legendFormat": "At Millau", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Best finalized Rialto headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages generated at Rialto are not detected by relay", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 9 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m]) - min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[5m])", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 9 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "sum" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "7m", - "frequency": "1m", - "handler": 1, - "name": "Messages from Rialto to Millau are not being delivered", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 20 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_generated\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "format": "time_series", - "instant": false, - "interval": "", - "legendFormat": "Undelivered messages at Millau", - "refId": "A" - }, - { - "expr": "increase(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[1m]) OR on() vector(0)", - "interval": "", - "legendFormat": "Messages delivered to Millau in last 1m", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race lags (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Too many unconfirmed messages", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 20 - }, - "hiddenSeries": false, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed messages at Rialto", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race lags (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 10 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Rewards are not being confirmed", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 20 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Millau", - "refId": "A" - }, - { - "expr": "(scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"source_latest_confirmed\"}[2m])) - scalar(max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_confirmed\"}[2m]))) * (max_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]) > bool min_over_time(Rialto_to_Millau_MessageLane_00000000_lane_state_nonces{type=\"target_latest_received\"}[2m]))", - "interval": "", - "legendFormat": "Unconfirmed rewards at Millau (zero if messages are not being delivered to Millau)", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 10, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Reward lags (00000000)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "7m", - "frequency": "1m", - "handler": 1, - "name": "Messages (00000001) from Rialto to Millau are not being delivered", - "noDataState": "alerting", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 0, - "y": 27 - }, - "hiddenSeries": false, - "id": 21, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from Millau\", \"type\", \"source_latest_generated\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "increase(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=\"target_latest_received\"}[10m]) OR on() vector(0)", - "hide": true, - "interval": "", - "legendFormat": "Messages generated in last 5 minutes", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Delivery race (00000001)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "1m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Messages (00000001) from Rialto to Millau are not being confirmed", - "noDataState": "alerting", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 11, - "w": 12, - "x": 12, - "y": 27 - }, - "hiddenSeries": false, - "id": 22, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "label_replace(label_replace(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest message confirmed by Millau to Rialto\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest message received by Rialto\", \"type\", \"target_latest_received\")", - "interval": "", - "legendFormat": "{{type}}", - "refId": "A" - }, - { - "expr": "increase(Rialto_to_Millau_MessageLane_00000001_lane_state_nonces{type=\"source_latest_confirmed\"}[10m]) OR on() vector(0)", - "hide": true, - "interval": "", - "legendFormat": "", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Confirmations race (00000001)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 8, - "x": 0, - "y": 38 - }, - "id": 16, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-millau-rialto:9616'}[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay process CPU usage (1 CPU = 100)", - "type": "gauge" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 8, - "y": 38 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "system_average_load{instance='relay-millau-rialto:9616'}", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "System load average", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "links": [] - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 8, - "x": 16, - "y": 38 - }, - "hiddenSeries": false, - "id": 20, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "process_memory_usage_bytes{instance='relay-millau-rialto:9616'} / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory used by relay process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Rialto to Millau Message Sync Dashboard", - "uid": "relay-rialto-to-millau-messages", - "version": 2 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json deleted file mode 100644 index 225e46fae3a..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/grafana/rialto-millau-maintenance-dashboard.json +++ /dev/null @@ -1,1059 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "exemplar": true, - "expr": "rialto_storage_proof_overhead{instance='relay-millau-rialto:9616'}", - "interval": "", - "legendFormat": "Actual overhead", - "refId": "A" - }, - { - "exemplar": true, - "expr": "1024", - "hide": false, - "interval": "", - "legendFormat": "At runtime (hardcoded)", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Rialto: storage proof overhead", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:111", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:112", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "exemplar": true, - "expr": "polkadot_to_base_conversion_rate{instance='relay-millau-rialto:9616'} / kusama_to_base_conversion_rate{instance='relay-millau-rialto:9616'}", - "interval": "", - "legendFormat": "Outside of runtime (actually Polkadot -> Kusama)", - "refId": "A" - }, - { - "exemplar": true, - "expr": "Millau_Rialto_to_Millau_conversion_rate{instance='relay-millau-rialto:9616'}", - "hide": false, - "interval": "", - "legendFormat": "At runtime", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Millau: Rialto -> Millau conversion rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:49", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:50", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 8 - }, - "hiddenSeries": false, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "exemplar": true, - "expr": "millau_storage_proof_overhead{instance='relay-millau-rialto:9616'}", - "interval": "", - "legendFormat": "Actual overhead", - "refId": "A" - }, - { - "exemplar": true, - "expr": "1024", - "hide": false, - "interval": "", - "legendFormat": "At runtime (hardcoded)", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Millau: storage proof overhead", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:111", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:112", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "exemplar": true, - "expr": "kusama_to_base_conversion_rate{instance='relay-millau-rialto:9616'} / polkadot_to_base_conversion_rate{instance='relay-millau-rialto:9616'}", - "interval": "", - "legendFormat": "Outside of runtime (actually Kusama -> Polkadot)", - "refId": "A" - }, - { - "exemplar": true, - "expr": "Rialto_Millau_to_Rialto_conversion_rate{instance='relay-millau-rialto:9616'}", - "hide": false, - "interval": "", - "legendFormat": "At runtime", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Rialto: Millau -> Rialto conversion rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:49", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "$$hashKey": "object:50", - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1000 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "last" - }, - "type": "query" - }, - { - "evaluator": { - "params": [ - 1000 - ], - "type": "lt" - }, - "operator": { - "type": "or" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "last" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "At-Rialto relay balances are too low", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 16 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "at_Rialto_relay_balance", - "interval": "", - "legendFormat": "Relay account balance", - "refId": "A" - }, - { - "expr": "at_Rialto_messages_pallet_owner_balance", - "interval": "", - "legendFormat": "Messages pallet owner balance", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1000 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Rialto relay balances", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1000 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "last" - }, - "type": "query" - }, - { - "evaluator": { - "params": [ - 1000 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "last" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "At-Millau relay balances are too low", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "at_Millau_relay_balance", - "interval": "", - "legendFormat": "Relay account balance", - "refId": "A" - }, - { - "expr": "at_Millau_messages_pallet_owner_balance", - "interval": "", - "legendFormat": "Messages pallet owner balance", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1000 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Millau relay balances", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 0 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Whether with-Rialto-grandpa-pallet and Rialto itself are on different forks alert", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 24 - }, - "hiddenSeries": false, - "id": 11, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Rialto_to_Millau_MessageLane_00000000_is_source_and_source_at_target_using_different_forks", - "interval": "", - "legendFormat": "On different forks?", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 0 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Whether with-Rialto-grandpa-pallet and Rialto itself are on different forks", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 0 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Whether with-Rialto-grandpa-pallet and Rialto itself are on different forks alert", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 24 - }, - "hiddenSeries": false, - "id": 12, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Millau_to_Rialto_MessageLane_00000000_is_source_and_source_at_target_using_different_forks", - "interval": "", - "legendFormat": "On different forks?", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 0 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Whether with-Millau-grandpa-pallet and Millau itself are on different forks", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "10s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "Rialto+Millau maintenance dashboard", - "uid": "7AuyrjlMz", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml b/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml deleted file mode 100644 index 16b798b5a25..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/dashboard/prometheus/targets.yml +++ /dev/null @@ -1,4 +0,0 @@ -- targets: - - relay-millau-rialto:9616 - - relay-messages-millau-to-rialto-lane-00000001:9616 - - relay-messages-rialto-to-millau-lane-00000001:9616 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml b/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml deleted file mode 100644 index 5d774a57802..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/docker-compose.yml +++ /dev/null @@ -1,116 +0,0 @@ -# Exposed ports: 10016, 10116, 10216, 10316, 10416, 10516, 10716 - -version: '3.5' -services: - # We provide overrides for these particular nodes since they are public facing - # nodes which we use to connect from things like Polkadot JS Apps. - rialto-node-charlie: - environment: - VIRTUAL_HOST: wss.rialto.brucke.link - VIRTUAL_PORT: 9944 - LETSENCRYPT_HOST: wss.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - - millau-node-charlie: - environment: - VIRTUAL_HOST: wss.millau.brucke.link - VIRTUAL_PORT: 9944 - LETSENCRYPT_HOST: wss.millau.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - - relay-millau-rialto: &sub-bridge-relay - image: paritytech/substrate-relay - entrypoint: /entrypoints/relay-millau-rialto-entrypoint.sh - volumes: - - ./bridges/rialto-millau/entrypoints:/entrypoints - environment: - RUST_LOG: rpc=trace,bridge=trace - ports: - - "10016:9616" - depends_on: &all-nodes - - millau-node-alice - - millau-node-bob - - millau-node-charlie - - millau-node-dave - - millau-node-eve - - rialto-node-alice - - rialto-node-bob - - rialto-node-charlie - - rialto-node-dave - - rialto-node-eve - - relay-messages-millau-to-rialto-lane-00000001: - <<: *sub-bridge-relay - environment: - MSG_EXCHANGE_GEN_LANE: "00000001" - entrypoint: /entrypoints/relay-messages-millau-to-rialto-entrypoint.sh - ports: - - "10116:9616" - depends_on: - - relay-millau-rialto - - relay-messages-millau-to-rialto-generator: - <<: *sub-bridge-relay - environment: - RUST_LOG: bridge=trace - MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" - entrypoint: /entrypoints/relay-messages-to-rialto-generator-entrypoint.sh - ports: - - "10216:9616" - depends_on: - - relay-millau-rialto - - relay-messages-millau-to-rialto-resubmitter: - <<: *sub-bridge-relay - environment: - RUST_LOG: bridge=trace - entrypoint: /entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh - ports: - - "10316:9616" - depends_on: - - relay-messages-millau-to-rialto-generator - - relay-messages-rialto-to-millau-lane-00000001: - <<: *sub-bridge-relay - environment: - RUST_LOG: bridge=trace - MSG_EXCHANGE_GEN_LANE: "00000001" - entrypoint: /entrypoints/relay-messages-rialto-to-millau-entrypoint.sh - ports: - - "10416:9616" - depends_on: - - relay-millau-rialto - - relay-messages-rialto-to-millau-generator: - <<: *sub-bridge-relay - environment: - MSG_EXCHANGE_GEN_SECONDARY_LANE: "00000001" - entrypoint: /entrypoints/relay-messages-to-millau-generator-entrypoint.sh - ports: - - "10516:9616" - depends_on: - - relay-millau-rialto - - relay-token-swap-generator: - <<: *sub-bridge-relay - entrypoint: /entrypoints/relay-token-swap-generator-entrypoint.sh - ports: - - "10716:9616" - depends_on: - - relay-millau-rialto - - # Note: These are being overridden from the top level `monitoring` compose file. - grafana-dashboard: - environment: - VIRTUAL_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link - VIRTUAL_PORT: 3000 - LETSENCRYPT_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - volumes: - - ./bridges/rialto-millau/dashboard/grafana:/etc/grafana/dashboards/rialto-millau:ro - - ./networks/dashboard/grafana/beefy-dashboard.json:/etc/grafana/dashboards/beefy.json - - prometheus-metrics: - volumes: - - ./bridges/rialto-millau/dashboard/prometheus/targets.yml:/etc/prometheus/targets-rialto-millau.yml - depends_on: *all-nodes diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh deleted file mode 100755 index 743cc47f07e..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-millau-to-rialto-entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 15 - -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} - -/home/user/substrate-relay relay-messages millau-to-rialto \ - --lane $MESSAGE_LANE \ - --source-host millau-node-bob \ - --source-port 9944 \ - --source-signer //Eve \ - --target-host rialto-node-bob \ - --target-port 9944 \ - --target-signer //Eve \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh deleted file mode 100755 index 2b536dbd817..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-rialto-to-millau-entrypoint.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 15 - -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} - -/home/user/substrate-relay relay-messages rialto-to-millau \ - --lane $MESSAGE_LANE \ - --source-host rialto-node-bob \ - --source-port 9944 \ - --source-signer //Ferdie \ - --target-host millau-node-bob \ - --target-port 9944 \ - --target-signer //Ferdie \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh deleted file mode 100755 index e20b3da7df8..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-millau-generator-entrypoint.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls the Substrate relay binary to generate messages. These messages -# are sent from the Rialto network to the Millau network. - -set -eu - -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} -MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=1024 -FERDIE_ADDR=5oSLwptwgySxh5vz1HdvznQJjbQVgwYSvHEpYYeTXu1Ei8j7 - -SHARED_CMD="/home/user/substrate-relay send-message rialto-to-millau" -SHARED_HOST="--source-host rialto-node-bob --source-port 9944" -DAVE_SIGNER="--source-signer //Dave --target-signer //Dave" - -SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" - -# Sleep a bit between messages -rand_sleep() { - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S - NOW=`date "+%Y-%m-%d %H:%M:%S"` - echo "Woke up at $NOW" -} - -# start sending large messages immediately -LARGE_MESSAGES_TIME=0 -# start sending message packs in a hour -BUNCH_OF_MESSAGES_TIME=3600 - -while true -do - rand_sleep - echo "Sending Remark from Rialto to Millau using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - remark - - if [ ! -z $SECONDARY_MESSAGE_LANE ]; then - echo "Sending Remark from Rialto to Millau using Target Origin using secondary lane: $SECONDARY_MESSAGE_LANE" - $SEND_MESSAGE \ - --lane $SECONDARY_MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - --dispatch-fee-payment at-target-chain \ - remark - fi - - rand_sleep - echo "Sending Transfer from Rialto to Millau using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR - - rand_sleep - echo "Sending Remark from Rialto to Millau using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Source \ - remark - - rand_sleep - echo "Sending Transfer from Rialto to Millau using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Source \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR - - # every other hour we're sending 3 large (size, weight, size+weight) messages - if [ $SECONDS -ge $LARGE_MESSAGES_TIME ]; then - LARGE_MESSAGES_TIME=$((SECONDS + 7200)) - - rand_sleep - echo "Sending Maximal Size Remark from Rialto to Millau using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - remark \ - --remark-size=max - - rand_sleep - echo "Sending Maximal Dispatch Weight Remark from Rialto to Millau using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - --dispatch-weight=max \ - remark - - rand_sleep - echo "Sending Maximal Size and Dispatch Weight Remark from Rialto to Millau using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - --dispatch-weight=max \ - remark \ - --remark-size=max - - fi - - # every other hour we're sending a bunch of small messages - if [ $SECONDS -ge $BUNCH_OF_MESSAGES_TIME ]; then - BUNCH_OF_MESSAGES_TIME=$((SECONDS + 7200)) - - SEND_MESSAGE_OUTPUT=`$SEND_MESSAGE --lane $MESSAGE_LANE --conversion-rate-override metric --origin Target remark 2>&1` - echo $SEND_MESSAGE_OUTPUT - ACTUAL_CONVERSION_RATE_REGEX="conversion rate override: ([0-9\.]+)" - if [[ $SEND_MESSAGE_OUTPUT =~ $ACTUAL_CONVERSION_RATE_REGEX ]]; then - ACTUAL_CONVERSION_RATE=${BASH_REMATCH[1]} - else - echo "Unable to find conversion rate in send-message output" - exit 1 - fi - - for i in $(seq 1 $MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE); - do - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override $ACTUAL_CONVERSION_RATE \ - --origin Target \ - remark - done - - fi -done diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh deleted file mode 100755 index a8e032bbdfd..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-generator-entrypoint.sh +++ /dev/null @@ -1,143 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls the Substrate relay binary to generate messages. These messages -# are sent from the Millau network to the Rialto network. - -set -eu - -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=${MSG_EXCHANGE_GEN_MAX_SUBMIT_DELAY_S:-30} -MESSAGE_LANE=${MSG_EXCHANGE_GEN_LANE:-00000000} -SECONDARY_MESSAGE_LANE=${MSG_EXCHANGE_GEN_SECONDARY_LANE} -MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE=128 -FERDIE_ADDR=6ztG3jPnJTwgZnnYsgCDXbbQVR82M96hBZtPvkN56A9668ZC - -SHARED_CMD=" /home/user/substrate-relay send-message millau-to-rialto" -SHARED_HOST="--source-host millau-node-bob --source-port 9944" -DAVE_SIGNER="--target-signer //Dave --source-signer //Dave" - -SEND_MESSAGE="$SHARED_CMD $SHARED_HOST $DAVE_SIGNER" - -# Sleep a bit between messages -rand_sleep() { - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S - NOW=`date "+%Y-%m-%d %H:%M:%S"` - echo "Woke up at $NOW" -} - -# start sending large messages immediately -LARGE_MESSAGES_TIME=0 -# start sending message packs in a hour -BUNCH_OF_MESSAGES_TIME=3600 - -while true -do - rand_sleep - echo "Sending Remark from Millau to Rialto using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - remark - - if [ ! -z $SECONDARY_MESSAGE_LANE ]; then - echo "Sending Remark from Millau to Rialto using Target Origin using secondary lane: $SECONDARY_MESSAGE_LANE" - $SEND_MESSAGE \ - --lane $SECONDARY_MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - --dispatch-fee-payment at-target-chain \ - remark - fi - - rand_sleep - echo "Sending Transfer from Millau to Rialto using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR - - rand_sleep - echo "Sending Remark from Millau to Rialto using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Source \ - remark - - rand_sleep - echo "Sending Transfer from Millau to Rialto using Source Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Source \ - transfer \ - --amount 1000000000 \ - --recipient $FERDIE_ADDR - - # every other hour we're sending 3 large (size, weight, size+weight) messages - if [ $SECONDS -ge $LARGE_MESSAGES_TIME ]; then - LARGE_MESSAGES_TIME=$((SECONDS + 7200)) - - rand_sleep - echo "Sending Maximal Size Remark from Millau to Rialto using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - remark \ - --remark-size=max - - rand_sleep - echo "Sending Maximal Dispatch Weight Remark from Millau to Rialto using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - --dispatch-weight=max \ - remark - - rand_sleep - echo "Sending Maximal Size and Dispatch Weight Remark from Millau to Rialto using Target Origin" - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override metric \ - --origin Target \ - --dispatch-weight=max \ - remark \ - --remark-size=max - - fi - - # every other hour we're sending a bunch of small messages - if [ $SECONDS -ge $BUNCH_OF_MESSAGES_TIME ]; then - BUNCH_OF_MESSAGES_TIME=$((SECONDS + 7200)) - - SEND_MESSAGE_OUTPUT=`$SEND_MESSAGE --lane $MESSAGE_LANE --conversion-rate-override metric --origin Target remark 2>&1` - echo $SEND_MESSAGE_OUTPUT - ACTUAL_CONVERSION_RATE_REGEX="conversion rate override: ([0-9\.]+)" - if [[ $SEND_MESSAGE_OUTPUT =~ $ACTUAL_CONVERSION_RATE_REGEX ]]; then - ACTUAL_CONVERSION_RATE=${BASH_REMATCH[1]} - else - echo "Unable to find conversion rate in send-message output" - exit 1 - fi - - for i in $(seq 2 $MAX_UNCONFIRMED_MESSAGES_AT_INBOUND_LANE); - do - $SEND_MESSAGE \ - --lane $MESSAGE_LANE \ - --conversion-rate-override $ACTUAL_CONVERSION_RATE \ - --origin Target \ - remark - done - - fi -done diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh deleted file mode 100755 index 068560e1502..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-messages-to-rialto-resubmitter-entrypoint.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 15 - -# //Dave is signing Millau -> Rialto message-send transactions, which are causing problems. -# -# When large message is being sent from Millau to Rialto AND other transactions are -# blocking it from being mined, we'll see something like this in logs: -# -# Millau transaction priority with tip=0: 17800827994. Target priority: -# 526186677695 -# -# So since fee multiplier in Millau is `1` and `WeightToFee` is `IdentityFee`, then -# we need tip around `526186677695 - 17800827994 = 508_385_849_701`. Let's round it -# up to `1_000_000_000_000`. - -/home/user/substrate-relay resubmit-transactions millau \ - --target-host millau-node-alice \ - --target-port 9944 \ - --target-signer //Dave \ - --stalled-blocks 5 \ - --tip-limit 1000000000000 \ - --tip-step 1000000000 \ - make-it-best-transaction diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh deleted file mode 100755 index bab0e1c4af3..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-millau-rialto-entrypoint.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 15 - -/home/user/substrate-relay init-bridge millau-to-rialto \ - --source-host millau-node-alice \ - --source-port 9944 \ - --target-host rialto-node-alice \ - --target-port 9944 \ - --target-signer //Alice - -/home/user/substrate-relay init-bridge rialto-to-millau \ - --source-host rialto-node-alice \ - --source-port 9944 \ - --target-host millau-node-alice \ - --target-port 9944 \ - --target-signer //Alice - -# Give chain a little bit of time to process initialization transaction -sleep 6 - -/home/user/substrate-relay relay-headers-and-messages millau-rialto \ - --millau-host millau-node-alice \ - --millau-port 9944 \ - --millau-signer //Charlie \ - --millau-messages-pallet-owner=//RialtoMessagesOwner \ - --millau-transactions-mortality=64 \ - --rialto-host rialto-node-alice \ - --rialto-port 9944 \ - --rialto-signer //Charlie \ - --rialto-messages-pallet-owner=//MillauMessagesOwner \ - --rialto-transactions-mortality=64 \ - --lane=00000000 \ - --lane=73776170 \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh b/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh deleted file mode 100755 index 010c0572d50..00000000000 --- a/polkadot/bridges/deployments/bridges/rialto-millau/entrypoints/relay-token-swap-generator-entrypoint.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -# THIS SCRIPT IS NOT INTENDED FOR USE IN PRODUCTION ENVIRONMENT -# -# This scripts periodically calls the Substrate relay binary to generate messages. These messages -# are sent from the Millau network to the Rialto network. - -set -eu - -# Max delay before submitting transactions (s) -MAX_SUBMIT_DELAY_S=60 -SOURCE_HOST=millau-node-charlie -SOURCE_PORT=9944 -TARGET_HOST=rialto-node-charlie -TARGET_PORT=9944 - -# Sleep a bit between messages -rand_sleep() { - SUBMIT_DELAY_S=`shuf -i 0-$MAX_SUBMIT_DELAY_S -n 1` - echo "Sleeping $SUBMIT_DELAY_S seconds..." - sleep $SUBMIT_DELAY_S - NOW=`date "+%Y-%m-%d %H:%M:%S"` - echo "Woke up at $NOW" -} - -while true -do - rand_sleep - echo "Initiating token-swap between Rialto and Millau" - /home/user/substrate-relay \ - swap-tokens \ - millau-to-rialto \ - --source-host $SOURCE_HOST \ - --source-port $SOURCE_PORT \ - --source-signer //WithRialtoTokenSwap \ - --source-balance 100000 \ - --target-host $TARGET_HOST \ - --target-port $TARGET_PORT \ - --target-signer //WithMillauTokenSwap \ - --target-balance 200000 \ - --target-to-source-conversion-rate-override metric \ - --source-to-target-conversion-rate-override metric \ - lock-until-block \ - --blocks-before-expire 32 -done diff --git a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json deleted file mode 100644 index 682ac2c7786..00000000000 --- a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/grafana/relay-westend-to-millau-headers-dashboard.json +++ /dev/null @@ -1,781 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 32 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "60m", - "frequency": "5m", - "handler": 1, - "message": "", - "name": "Synced Header Difference is Over 32 (Westend to Millau)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "Shows how many headers behind the target chain is from the source chain.", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 14, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(Westend_to_Millau_Sync_best_source_block_number) - max(Westend_to_Millau_Sync_best_source_at_target_block_number)", - "format": "table", - "instant": false, - "interval": "", - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Difference Between Source and Target Headers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 32 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "2m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "min" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "60m", - "frequency": "5m", - "handler": 1, - "name": "No New Headers (Westend to Millau)", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "How many headers has the relay synced from the source node in the last 2 mins?", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max_over_time(Westend_to_Millau_Sync_best_source_block_number[10m])-min_over_time(Westend_to_Millau_Sync_best_source_block_number[10m])", - "interval": "", - "legendFormat": "Number of new Headers on Westend (Last 10 Mins)", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 5 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Headers Synced on Millau (Last 2 Mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 2, - "interval": "5s", - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "Westend_to_Millau_Sync_best_source_block_number", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Westend Header at Westend", - "refId": "A" - }, - { - "expr": "Westend_to_Millau_Sync_best_source_at_target_block_number", - "format": "time_series", - "instant": true, - "interval": "", - "intervalFactor": 1, - "legendFormat": "Best Known Westend Header at Millau", - "refId": "B" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Best Blocks according to Relay", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 8 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "system_average_load{instance='relay-headers-westend-to-millau:9616'}", - "interval": "", - "legendFormat": "Average system load in last {{over}}", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": null - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Average System Load", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 8 - }, - "id": 12, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "avg_over_time(process_cpu_usage_percentage{instance='relay-headers-westend-to-millau:9616'}[1m])", - "instant": true, - "interval": "", - "legendFormat": "1 CPU = 100", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Relay Process CPU Usage ", - "type": "gauge" - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 0 - ], - "type": "gt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "A", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Whether with-Westend-grandpa-pallet and Westend itself are on different forks alert", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 14 - }, - "hiddenSeries": false, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "Westend_to_Millau_Sync_is_source_and_source_at_target_using_different_forks", - "interval": "", - "legendFormat": "On different forks?", - "refId": "A" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 0 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Whether with-Westend-grandpa-pallet and Westend itself are on different forks", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "description": "", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 16 - }, - "hiddenSeries": false, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "process_memory_usage_bytes{instance='relay-headers-westend-to-millau:9616'} / 1024 / 1024", - "interval": "", - "legendFormat": "Process memory, MB", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage for Relay Process", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Westend to Millau Header Sync Dashboard", - "uid": "relay-westend-to-millau-headers", - "version": 1 -} diff --git a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml b/polkadot/bridges/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml deleted file mode 100644 index 5d49e112744..00000000000 --- a/polkadot/bridges/deployments/bridges/westend-millau/dashboard/prometheus/targets.yml +++ /dev/null @@ -1,2 +0,0 @@ -- targets: - - relay-headers-westend-to-millau:9616 diff --git a/polkadot/bridges/deployments/bridges/westend-millau/docker-compose.yml b/polkadot/bridges/deployments/bridges/westend-millau/docker-compose.yml deleted file mode 100644 index 8caa17ffb82..00000000000 --- a/polkadot/bridges/deployments/bridges/westend-millau/docker-compose.yml +++ /dev/null @@ -1,31 +0,0 @@ -# Exposed ports: 10616 - -version: '3.5' -services: - relay-headers-westend-to-millau: - image: paritytech/substrate-relay - entrypoint: /entrypoints/relay-headers-westend-to-millau-entrypoint.sh - volumes: - - ./bridges/westend-millau/entrypoints:/entrypoints - environment: - RUST_LOG: rpc=trace,bridge=trace - ports: - - "10616:9616" - depends_on: - - millau-node-alice - - # Note: These are being overridden from the top level `monitoring` compose file. - grafana-dashboard: - environment: - VIRTUAL_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link - VIRTUAL_PORT: 3000 - LETSENCRYPT_HOST: grafana.millau.brucke.link,grafana.rialto.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - volumes: - - ./bridges/westend-millau/dashboard/grafana:/etc/grafana/dashboards/westend-millau:ro - - prometheus-metrics: - volumes: - - ./bridges/westend-millau/dashboard/prometheus/targets.yml:/etc/prometheus/targets-westend-millau.yml - depends_on: - - relay-headers-westend-to-millau diff --git a/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh b/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh deleted file mode 100755 index f37ee69915c..00000000000 --- a/polkadot/bridges/deployments/bridges/westend-millau/entrypoints/relay-headers-westend-to-millau-entrypoint.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 15 - -/home/user/substrate-relay init-bridge westend-to-millau \ - --source-host westend-rpc.polkadot.io \ - --source-port 443 \ - --source-secure \ - --target-host millau-node-alice \ - --target-port 9944 \ - --target-signer //George - -# Give chain a little bit of time to process initialization transaction -sleep 6 -/home/user/substrate-relay relay-headers westend-to-millau \ - --source-host westend-rpc.polkadot.io \ - --source-port 443 \ - --source-secure \ - --target-host millau-node-alice \ - --target-port 9944 \ - --target-signer //George \ - --target-transactions-mortality=4\ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/bridge-entrypoint.sh b/polkadot/bridges/deployments/local-scripts/bridge-entrypoint.sh deleted file mode 100755 index 5c1b6e90ec2..00000000000 --- a/polkadot/bridges/deployments/local-scripts/bridge-entrypoint.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -xeu - -# This will allow us to run whichever binary the user wanted -# with arguments passed through `docker run` -# e.g `docker run -it rialto-bridge-node-dev --dev --tmp` -/home/user/$PROJECT $@ diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh deleted file mode 100755 index 61028e1756b..00000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-headers-rococo-to-wococo.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# -# Run an instance of the Rococo -> Wococo header sync. -# -# Right now this relies on local Wococo and Rococo networks -# running (which include `pallet-bridge-grandpa` in their -# runtimes), but in the future it could use use public RPC nodes. - -set -xeu - -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge rococo-to-wococo \ - --source-host 127.0.0.1 \ - --source-port 9955 \ - --target-host 127.0.0.1 \ - --target-port 9944 \ - --target-signer //Alice - -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers rococo-to-wococo \ - --source-host 127.0.0.1 \ - --source-port 9955 \ - --target-host 127.0.0.1 \ - --target-port 9944 \ - --target-signer //Bob \ - --prometheus-host=0.0.0.0 \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh b/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh deleted file mode 100755 index c57db2086fb..00000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-headers-wococo-to-rococo.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# -# Run an instance of the Wococo -> Rococo header sync. -# -# Right now this relies on local Wococo and Rococo networks -# running (which include `pallet-bridge-grandpa` in their -# runtimes), but in the future it could use use public RPC nodes. - -set -xeu - -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay init-bridge wococo-to-rococo \ - --source-host 127.0.0.1 \ - --source-port 9944 \ - --target-host 127.0.0.1 \ - --target-port 9955 \ - --target-signer //Alice - -RUST_LOG=rpc=trace,bridge=trace ./target/debug/substrate-relay relay-headers wococo-to-rococo \ - --source-host 127.0.0.1 \ - --source-port 9944 \ - --target-host 127.0.0.1 \ - --target-port 9955 \ - --target-signer //Charlie \ - --prometheus-host=0.0.0.0 \ diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh deleted file mode 100755 index 36673d31be8..00000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-messages-millau-to-rialto.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# A script for relaying Millau messages to the Rialto chain. -# -# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` -# and `run-millau-node.sh). -set -xeu - -MILLAU_PORT="${MILLAU_PORT:-9945}" -RIALTO_PORT="${RIALTO_PORT:-9944}" - -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-messages millau-to-rialto \ - --relayer-mode=altruistic \ - --lane 00000000 \ - --source-host localhost \ - --source-port $MILLAU_PORT \ - --source-signer //Bob \ - --target-host localhost \ - --target-port $RIALTO_PORT \ - --target-signer //Bob \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh deleted file mode 100755 index 89e2b818245..00000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-messages-rialto-to-millau.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# A script for relaying Rialto messages to the Millau chain. -# -# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` -# and `run-millau-node.sh). -set -xeu - -MILLAU_PORT="${MILLAU_PORT:-9945}" -RIALTO_PORT="${RIALTO_PORT:-9944}" - -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-messages rialto-to-millau \ - --relayer-mode=altruistic \ - --lane 00000000 \ - --source-host localhost \ - --source-port $RIALTO_PORT \ - --source-signer //Bob \ - --target-host localhost \ - --target-port $MILLAU_PORT \ - --target-signer //Bob \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh b/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh deleted file mode 100755 index 35d88d1a643..00000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-millau-to-rialto.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# A script for relaying Millau headers to the Rialto chain. -# -# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` -# and `run-millau-node.sh). - -MILLAU_PORT="${MILLAU_PORT:-9945}" -RIALTO_PORT="${RIALTO_PORT:-9944}" - -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge millau-to-rialto \ - --source-host localhost \ - --source-port $MILLAU_PORT \ - --target-host localhost \ - --target-port $RIALTO_PORT \ - --target-signer //Alice \ - --source-version-mode Bundle \ - --target-version-mode Bundle - -sleep 5 -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers millau-to-rialto \ - --source-host localhost \ - --source-port $MILLAU_PORT \ - --target-host localhost \ - --target-port $RIALTO_PORT \ - --target-signer //Alice \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh b/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh deleted file mode 100755 index c66c994f06a..00000000000 --- a/polkadot/bridges/deployments/local-scripts/relay-rialto-to-millau.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# A script for relaying Rialto headers to the Millau chain. -# -# Will not work unless both the Rialto and Millau are running (see `run-rialto-node.sh` -# and `run-millau-node.sh). - -MILLAU_PORT="${MILLAU_PORT:-9945}" -RIALTO_PORT="${RIALTO_PORT:-9944}" - -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay init-bridge rialto-to-millau \ - --target-host localhost \ - --target-port $MILLAU_PORT \ - --source-host localhost \ - --source-port $RIALTO_PORT \ - --target-signer //Alice \ - -sleep 5 -RUST_LOG=bridge=debug \ -./target/debug/substrate-relay relay-headers rialto-to-millau \ - --target-host localhost \ - --target-port $MILLAU_PORT \ - --source-host localhost \ - --source-port $RIALTO_PORT \ - --target-signer //Alice \ - --prometheus-host=0.0.0.0 diff --git a/polkadot/bridges/deployments/local-scripts/run-millau-node.sh b/polkadot/bridges/deployments/local-scripts/run-millau-node.sh deleted file mode 100755 index 916f876c536..00000000000 --- a/polkadot/bridges/deployments/local-scripts/run-millau-node.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Millau Substrate bridge node. -# To override the default port just export MILLAU_PORT=9945 - -MILLAU_PORT="${MILLAU_PORT:-9945}" - -RUST_LOG=runtime=trace \ -./target/debug/millau-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33044 --rpc-port 9934 --ws-port $MILLAU_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh b/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh deleted file mode 100755 index e7987e2af36..00000000000 --- a/polkadot/bridges/deployments/local-scripts/run-rialto-node.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Rialto Substrate bridge node. -# To override the default port just export RIALTO_PORT=9944 - -RIALTO_PORT="${RIALTO_PORT:-9944}" - -RUST_LOG=runtime=trace \ - ./target/debug/rialto-bridge-node --dev --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33033 --rpc-port 9933 --ws-port $RIALTO_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-rococo-node.sh b/polkadot/bridges/deployments/local-scripts/run-rococo-node.sh deleted file mode 100755 index 4d43321eba0..00000000000 --- a/polkadot/bridges/deployments/local-scripts/run-rococo-node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Rococo Substrate bridge node. -# To override the default port just export ROCOCO_PORT=9955 -# -# Note: This script will not work out of the box with the bridges -# repo since it relies on a Polkadot binary. - -ROCOCO_PORT="${ROCOCO_PORT:-9955}" - -RUST_LOG=runtime=trace,runtime::bridge=trace \ -./target/debug/polkadot --chain=rococo-dev --alice --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33044 --rpc-port 9934 --ws-port $ROCOCO_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-westend-node.sh b/polkadot/bridges/deployments/local-scripts/run-westend-node.sh deleted file mode 100755 index 1bb490fc1a8..00000000000 --- a/polkadot/bridges/deployments/local-scripts/run-westend-node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Westend Substrate bridge node. -# To override the default port just export WESTEND_PORT=9945 -# -# Note: This script will not work out of the box with the bridges -# repo since it relies on a Polkadot binary. - -WESTEND_PORT="${WESTEND_PORT:-9944}" - -RUST_LOG=runtime=trace,runtime::bridge=trace \ -./target/debug/polkadot --chain=westend-dev --alice --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33033 --rpc-port 9933 --ws-port $WESTEND_PORT \ diff --git a/polkadot/bridges/deployments/local-scripts/run-wococo-node.sh b/polkadot/bridges/deployments/local-scripts/run-wococo-node.sh deleted file mode 100755 index f314c0c7fa0..00000000000 --- a/polkadot/bridges/deployments/local-scripts/run-wococo-node.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -# Run a development instance of the Wococo Substrate bridge node. -# To override the default port just export WOCOCO_PORT=9955 -# -# Note: This script will not work out of the box with the bridges -# repo since it relies on a Polkadot binary. - -WOCOCO_PORT="${WOCOCO_PORT:-9944}" - -RUST_LOG=runtime=trace,runtime::bridge=trace \ -./target/debug/polkadot --chain=wococo-dev --alice --tmp \ - --rpc-cors=all --unsafe-rpc-external --unsafe-ws-external \ - --port 33033 --rpc-port 9933 --ws-port $WOCOCO_PORT \ diff --git a/polkadot/bridges/deployments/monitoring/GrafanaMatrix.Dockerfile b/polkadot/bridges/deployments/monitoring/GrafanaMatrix.Dockerfile deleted file mode 100644 index df80f700215..00000000000 --- a/polkadot/bridges/deployments/monitoring/GrafanaMatrix.Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM docker.io/library/ruby:alpine - -RUN apk add --no-cache git - -ENV APP_HOME /app -ENV RACK_ENV production -RUN mkdir $APP_HOME -WORKDIR $APP_HOME - -# The latest master has some changes in how the application is run. We don't -# want to update just yet so we're pinning to an old commit. -RUN git clone https://github.com/ananace/ruby-grafana-matrix.git $APP_HOME -RUN git checkout 0d662b29633d16176291d11a2d85ba5107cf7de3 -RUN bundle install --without development - -RUN mkdir /config && touch /config/config.yml && ln -s /config/config.yml ./config.yml - -CMD ["bundle", "exec", "bin/server"] diff --git a/polkadot/bridges/deployments/monitoring/disabled.yml b/polkadot/bridges/deployments/monitoring/disabled.yml deleted file mode 100644 index a0b4ed3aad0..00000000000 --- a/polkadot/bridges/deployments/monitoring/disabled.yml +++ /dev/null @@ -1,15 +0,0 @@ -# A disabled version of monitoring. -# -# We replace each service with a no-op container. We can't simply not include this file, -# cause the bridge-specific compose files might have overrides. -version: '3.5' -services: - prometheus-metrics: - image: alpine - - grafana-dashboard: - image: alpine - - grafana-matrix-notifier: - image: alpine - diff --git a/polkadot/bridges/deployments/monitoring/docker-compose.yml b/polkadot/bridges/deployments/monitoring/docker-compose.yml deleted file mode 100644 index 5456cb76dc7..00000000000 --- a/polkadot/bridges/deployments/monitoring/docker-compose.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: '3.5' -services: - prometheus-metrics: - image: prom/prometheus:v2.20.1 - volumes: - - ./monitoring/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml - ports: - - "9090:9090" - - grafana-dashboard: - image: grafana/grafana:7.1.3 - environment: - GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASS:-admin} - GF_SERVER_ROOT_URL: ${GRAFANA_SERVER_ROOT_URL} - GF_SERVER_DOMAIN: ${GRAFANA_SERVER_DOMAIN} - volumes: - - ./monitoring/grafana/provisioning/:/etc/grafana/provisioning/:ro - ports: - - "3000:3000" - depends_on: - - prometheus-metrics - - grafana-matrix-notifier: - build: - context: . - dockerfile: ./monitoring/GrafanaMatrix.Dockerfile - volumes: - - ./monitoring/grafana-matrix:/config - ports: - - "4567:4567" - depends_on: - - grafana-dashboard diff --git a/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml b/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml deleted file mode 100644 index 645ee708fef..00000000000 --- a/polkadot/bridges/deployments/monitoring/grafana-matrix/config.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -# Webhook server configuration -# Or use the launch options `-o '::' -p 4567` -#bind: '::' -#port: 4567 - -# Set up your HS connections -matrix: -- name: matrix-parity-io - url: https://matrix.parity.io - # Create a user - log that user in using a post request - # curl -XPOST -d '{"type": "m.login.password", - # "user":"grafana", - # "password":"dummy-password"}' - # "https://my-matrix-server/_matrix/client/r0/login" - # Fill that access token in here - access_token: "" - #device_id: # Optional - -# The default message type for messages, should be either m.text or m.notice, -# defaults to m.text -msgtype: m.text - -# Set up notification ingress rules -rules: -- name: bridge # Name of the rule - room: "#bridges-workers:matrix.parity.io" # Room or ID - matrix: matrix-parity-io # The Matrix HS to use - defaults to first one - msgtype: m.notice - # The following values are optional: - image: true # Attach image to the notification? - embed_image: true # Upload and embed the image into the message? - #templates: - # Templates to use when rendering the notification, available placeholders: - # %TEMPLATES% - lib/grafana_matrix/templates - # $ - Environment variables - #html: "%TEMPLATES%/html.erb" # Path to HTML template - #plain: "%TEMPLATES%/plain.erb" # Path to plaintext template - #auth: - #user: example - #pass: any HTTP encodable string -#- name: other-hq -# room: "#hq:private.matrix.org -# matrix: matrix-priv - -# To use the webhook, you need to configure it into Grafana as: -# -# Url: http://:/hook?rule= -# Http Method: POST diff --git a/polkadot/bridges/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml b/polkadot/bridges/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml deleted file mode 100644 index d14ed2637d5..00000000000 --- a/polkadot/bridges/deployments/monitoring/grafana/provisioning/dashboards/grafana-dashboard.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- name: 'default' - orgId: 1 - folder: '' - type: file - options: - path: '/etc/grafana/dashboards' \ No newline at end of file diff --git a/polkadot/bridges/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml b/polkadot/bridges/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml deleted file mode 100644 index b85cf06e2bd..00000000000 --- a/polkadot/bridges/deployments/monitoring/grafana/provisioning/datasources/grafana-datasource.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# list of datasources to insert/update depending -# whats available in the database -datasources: - # name of the datasource. Required -- name: Prometheus - # datasource type. Required - type: prometheus - # access mode. direct or proxy. Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://prometheus-metrics:9090 - # mark as default datasource. Max one per org - isDefault: true - version: 1 diff --git a/polkadot/bridges/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml b/polkadot/bridges/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml deleted file mode 100644 index 4eb6ea3863e..00000000000 --- a/polkadot/bridges/deployments/monitoring/grafana/provisioning/notifiers/grafana-notifier.yaml +++ /dev/null @@ -1,15 +0,0 @@ -notifiers: - - name: Matrix - type: webhook - uid: notifier1 - is_default: true - send_reminder: true - frequency: 1h - disable_resolve_message: false - settings: - url: http://grafana-matrix-notifier:4567/hook?rule=bridge - http_method: POST - -delete_notifiers: - - name: Matrix - uid: notifier1 diff --git a/polkadot/bridges/deployments/monitoring/prometheus/prometheus.yml b/polkadot/bridges/deployments/monitoring/prometheus/prometheus.yml deleted file mode 100644 index 7092bd27314..00000000000 --- a/polkadot/bridges/deployments/monitoring/prometheus/prometheus.yml +++ /dev/null @@ -1,7 +0,0 @@ -global: - scrape_interval: 15s -scrape_configs: - - job_name: dummy - file_sd_configs: - - files: - - /etc/prometheus/targets-*.yml diff --git a/polkadot/bridges/deployments/networks/dashboard/grafana/beefy-dashboard.json b/polkadot/bridges/deployments/networks/dashboard/grafana/beefy-dashboard.json deleted file mode 100644 index 0216e145548..00000000000 --- a/polkadot/bridges/deployments/networks/dashboard/grafana/beefy-dashboard.json +++ /dev/null @@ -1,539 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "links": [], - "panels": [ - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 1 - ], - "type": "lt" - }, - "operator": { - "type": "and" - }, - "query": { - "params": [ - "C", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Beefy best blocks not advancing", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 14, - "w": 12, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "polkadot_beefy_best_block", - "legendFormat": "Rialto(Charlie)", - "refId": "A" - }, - { - "expr": "substrate_beefy_best_block", - "legendFormat": "Millau(Charlie)", - "refId": "B" - }, - { - "expr": "max_over_time(substrate_beefy_best_block[5m]) - min_over_time(substrate_beefy_best_block[5m])", - "hide": true, - "legendFormat": "Millau Best Beefy blocks count in last 5 minutes", - "refId": "C" - }, - { - "expr": "max_over_time(polkadot_beefy_best_block[5m]) - min_over_time(polkadot_beefy_best_block[5m])", - "hide": true, - "legendFormat": "Rialto Best Beefy blocks count in last 5 minutes", - "refId": "D" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "lt", - "value": 1 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Beefy Best block", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {}, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "yellow", - "value": null - }, - { - "color": "yellow", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 14, - "w": 11, - "x": 12, - "y": 0 - }, - "id": 4, - "options": { - "colorMode": "value", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "7.1.3", - "targets": [ - { - "expr": "polkadot_beefy_should_vote_on", - "legendFormat": "Rialto(Charlie) Should-Vote-On", - "refId": "C" - }, - { - "expr": "polkadot_beefy_round_concluded", - "legendFormat": "Rialto(Charlie) Round-Concluded", - "refId": "A" - }, - { - "expr": "substrate_beefy_should_vote_on", - "legendFormat": "Millau(Charlie) Should-Vote-On", - "refId": "D" - }, - { - "expr": "substrate_beefy_round_concluded", - "legendFormat": "Millau(Charlie) Round-Concluded", - "refId": "B" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Beefy Voting Rounds", - "type": "stat" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": {} - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 18, - "x": 0, - "y": 14 - }, - "hiddenSeries": false, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "polkadot_beefy_votes_sent", - "legendFormat": "Rialto (node Charlie)", - "refId": "A" - }, - { - "expr": "substrate_beefy_votes_sent", - "legendFormat": "Millau (node Charlie)", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Beefy Votes Sent", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "alert": { - "alertRuleTags": {}, - "conditions": [ - { - "evaluator": { - "params": [ - 0 - ], - "type": "gt" - }, - "operator": { - "type": "or" - }, - "query": { - "params": [ - "B", - "5m", - "now" - ] - }, - "reducer": { - "params": [], - "type": "max" - }, - "type": "query" - } - ], - "executionErrorState": "alerting", - "for": "5m", - "frequency": "1m", - "handler": 1, - "name": "Beefy Skipped Sessions alert", - "noDataState": "no_data", - "notifications": [] - }, - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "Prometheus", - "fieldConfig": { - "defaults": { - "custom": { - "align": null - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 1 - } - ] - } - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 5, - "x": 18, - "y": 14 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "percentage": false, - "pluginVersion": "7.1.3", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "polkadot_beefy_skipped_sessions", - "legendFormat": "Rialto(Charlie)", - "refId": "A" - }, - { - "expr": "substrate_beefy_skipped_sessions", - "legendFormat": "Millau(Charlie)", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 0 - } - ], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Beefy Skipped Sessions", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "5s", - "schemaVersion": 26, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-5m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ] - }, - "timezone": "", - "title": "Beefy", - "uid": "j6cRDRh7z", - "version": 1 -} diff --git a/polkadot/bridges/deployments/networks/dashboard/prometheus/millau-targets.yml b/polkadot/bridges/deployments/networks/dashboard/prometheus/millau-targets.yml deleted file mode 100644 index c7a06509276..00000000000 --- a/polkadot/bridges/deployments/networks/dashboard/prometheus/millau-targets.yml +++ /dev/null @@ -1,2 +0,0 @@ -- targets: - - millau-node-charlie:9615 diff --git a/polkadot/bridges/deployments/networks/dashboard/prometheus/rialto-targets.yml b/polkadot/bridges/deployments/networks/dashboard/prometheus/rialto-targets.yml deleted file mode 100644 index 9de26b9a2d7..00000000000 --- a/polkadot/bridges/deployments/networks/dashboard/prometheus/rialto-targets.yml +++ /dev/null @@ -1,2 +0,0 @@ -- targets: - - rialto-node-charlie:9615 diff --git a/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh b/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh deleted file mode 100755 index 0898978096d..00000000000 --- a/polkadot/bridges/deployments/networks/entrypoints/rialto-chainspec-exporter-entrypoint.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -xeu - -/home/user/rialto-bridge-node build-spec \ - --chain local \ - --raw \ - --disable-default-bootnode \ - > /rialto-share/rialto-relaychain-spec-raw.json - -# we're using local driver + tmpfs for shared `/rialto-share` volume, which is populated -# by the container running this script. If this script ends, the volume will be detached -# and our chain spec will be lost when it'll go online again. Hence the never-ending -# script which keeps volume online until container is stopped. -tail -f /dev/null diff --git a/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh b/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh deleted file mode 100755 index 519ab228e93..00000000000 --- a/polkadot/bridges/deployments/networks/entrypoints/rialto-parachain-registrar-entrypoint.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -xeu - -sleep 15 - -/home/user/substrate-relay register-parachain rialto-parachain \ - --parachain-host rialto-parachain-collator-alice \ - --parachain-port 9944 \ - --relaychain-host rialto-node-alice \ - --relaychain-port 9944 \ - --relaychain-signer //Alice diff --git a/polkadot/bridges/deployments/networks/millau.yml b/polkadot/bridges/deployments/networks/millau.yml deleted file mode 100644 index 13ac8d48772..00000000000 --- a/polkadot/bridges/deployments/networks/millau.yml +++ /dev/null @@ -1,101 +0,0 @@ -# Compose file for quickly spinning up a local instance of the Millau Substrate network. -# -# Note that the Millau network is only used for testing, so the configuration settings you see here -# are *not* recommended for a production environment. -# -# For example, do *not* keep your `node-key` in version control, and unless you're _really_ sure you -# want to provide public access to your nodes do *not* publicly expose RPC methods. -version: '3.5' -services: - millau-node-alice: &millau-bridge-node - image: paritytech/millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-bob/tcp/30333/p2p/12D3KooWM5LFR5ne4yTQ4sBSXJ75M4bDo2MAhAW2GhL3i8fe5aRb - - --alice - - --node-key=0f900c89f4e626f4a217302ab8c7d213737d00627115f318ad6fb169717ac8e0 - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,sc_basic_authorship=trace,beefy=debug - ports: - - "19933:9933" - - "19944:9944" - - millau-node-bob: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --bob - - --node-key=db383639ff2905d79f8e936fd5dc4416ef46b514b2f83823ec3c42753d7557bb - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "20033:9933" - - "20044:9944" - - millau-node-charlie: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --charlie - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - - --prometheus-external - ports: - - "20133:9933" - - "20144:9944" - - "20615:9615" - - millau-node-dave: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --dave - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "20233:9933" - - "20244:9944" - - millau-node-eve: - <<: *millau-bridge-node - entrypoint: - - /home/user/millau-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/millau-node-alice/tcp/30333/p2p/12D3KooWFqiV73ipQ1jpfVmCfLqBCp8G9PLH3zPkY9EhmdrSGA4H - - --eve - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "20333:9933" - - "20344:9944" - - # Note: These are being overridden from the top level `monitoring` compose file. - prometheus-metrics: - volumes: - - ./networks/dashboard/prometheus/millau-targets.yml:/etc/prometheus/targets-millau-nodes.yml - depends_on: - - millau-node-charlie diff --git a/polkadot/bridges/deployments/networks/rialto-parachain.yml b/polkadot/bridges/deployments/networks/rialto-parachain.yml deleted file mode 100644 index b2d2188f1ba..00000000000 --- a/polkadot/bridges/deployments/networks/rialto-parachain.yml +++ /dev/null @@ -1,90 +0,0 @@ -# Compose file for quickly spinning up a local instance of the Rialto Parachain network. -# -# Since Rialto Parachain is unusable without Rialto, this file depends on some Rialto -# network nodes. -version: '3.5' -services: - rialto-parachain-collator-alice: &rialto-parachain-collator - image: paritytech/rialto-parachain-collator - entrypoint: > - /home/user/rialto-parachain-collator - --alice - --collator - --force-authoring - --parachain-id 2000 - --rpc-port 9933 - --ws-port 9944 - --rpc-cors=all - --unsafe-rpc-external - --unsafe-ws-external - -- - --execution wasm - --chain /rialto-share/rialto-relaychain-spec-raw.json - --rpc-port 9934 - --ws-port 9945 - volumes: - - rialto-share:/rialto-share:z - environment: - RUST_LOG: runtime=trace,rpc=trace,txpool=trace,parachain=trace,parity_ws=trace - depends_on: - - rialto-chainspec-exporter - ports: - - "20433:9933" - - "20444:9944" - - rialto-parachain-collator-bob: - <<: *rialto-parachain-collator - entrypoint: > - /home/user/rialto-parachain-collator - --bob - --collator - --force-authoring - --parachain-id 2000 - --rpc-port 9933 - --ws-port 9944 - --rpc-cors=all - --unsafe-rpc-external - --unsafe-ws-external - -- - --execution wasm - --chain /rialto-share/rialto-relaychain-spec-raw.json - --rpc-port 9934 - --ws-port 9945 - ports: - - "20533:9933" - - "20544:9944" - - rialto-parachain-collator-charlie: - <<: *rialto-parachain-collator - entrypoint: > - /home/user/rialto-parachain-collator - --charlie - --collator - --force-authoring - --parachain-id 2000 - --rpc-port 9933 - --ws-port 9944 - --rpc-cors=all - --unsafe-rpc-external - --unsafe-ws-external - -- - --execution wasm - --chain /rialto-share/rialto-relaychain-spec-raw.json - --rpc-port 9934 - --ws-port 9945 - ports: - - "20633:9933" - - "20644:9944" - - rialto-parachain-registrar: - image: paritytech/substrate-relay - entrypoint: /entrypoints/rialto-parachain-registrar-entrypoint.sh - volumes: - - ./networks/entrypoints:/entrypoints - - rialto-share:/rialto-share:z - environment: - RUST_LOG: bridge=trace - depends_on: - - rialto-node-alice - - rialto-parachain-collator-alice - diff --git a/polkadot/bridges/deployments/networks/rialto.yml b/polkadot/bridges/deployments/networks/rialto.yml deleted file mode 100644 index 40e881a37c1..00000000000 --- a/polkadot/bridges/deployments/networks/rialto.yml +++ /dev/null @@ -1,118 +0,0 @@ -# Compose file for quickly spinning up a local instance of the Rialto Substrate network. -# -# Note that the Rialto network is only used for testing, so the configuration settings you see here -# are *not* recommended for a production environment. -# -# For example, do *not* keep your `node-key` in version control, and unless you're _really_ sure you -# want to provide public access to your nodes do *not* publicly expose RPC methods. -version: '3.5' -services: - rialto-node-alice: &rialto-bridge-node - image: paritytech/rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-bob/tcp/30333/p2p/12D3KooWSEpHJj29HEzgPFcRYVc5X3sEuP3KgiUoqJNCet51NiMX - - --alice - - --node-key=79cf382988364291a7968ae7825c01f68c50d679796a8983237d07fe0ccf363b - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - environment: - RUST_LOG: runtime=trace,rpc=debug,txpool=trace,runtime::bridge=trace,beefy=debug - ports: - - "9933:9933" - - "9944:9944" - - rialto-node-bob: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --bob - - --node-key=4f9d0146dd9b7b3bf5a8089e3880023d1df92057f89e96e07bb4d8c2ead75bbd - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "10033:9933" - - "10044:9944" - - rialto-node-charlie: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --charlie - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - - --prometheus-external - ports: - - "10133:9933" - - "10144:9944" - - "10615:9615" - - rialto-node-dave: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --dave - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "10233:9933" - - "10244:9944" - - rialto-node-eve: - <<: *rialto-bridge-node - entrypoint: - - /home/user/rialto-bridge-node - - --execution=Native - - --chain=local - - --bootnodes=/dns4/rialto-node-alice/tcp/30333/p2p/12D3KooWMF6JvV319a7kJn5pqkKbhR3fcM2cvK5vCbYZHeQhYzFE - - --eve - - --rpc-cors=all - - --enable-offchain-indexing=true - - --unsafe-rpc-external - - --unsafe-ws-external - ports: - - "10333:9933" - - "10344:9944" - - rialto-chainspec-exporter: - image: paritytech/rialto-bridge-node - entrypoint: /entrypoints/rialto-chainspec-exporter-entrypoint.sh - volumes: - - ./networks/entrypoints:/entrypoints - - rialto-share:/rialto-share:z - - # Note: These are being overridden from the top level `monitoring` compose file. - prometheus-metrics: - volumes: - - ./networks/dashboard/prometheus/rialto-targets.yml:/etc/prometheus/targets-rialto-nodes.yml - depends_on: - - rialto-node-charlie - -# we're using `/rialto-share` to expose Rialto chain spec to those who are interested. Right -# now it is Rialto Parachain collator nodes. Local + tmpfs combination allows sharing writable -# in-memory volumes, which are dropped when containers are stopped. -volumes: - rialto-share: - driver: local - driver_opts: - type: "tmpfs" - device: "tmpfs" diff --git a/polkadot/bridges/deployments/reverse-proxy/README.md b/polkadot/bridges/deployments/reverse-proxy/README.md deleted file mode 100644 index ded81f80a1b..00000000000 --- a/polkadot/bridges/deployments/reverse-proxy/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# nginx-proxy - -This is a nginx reverse proxy configuration with Let's encrypt companion. -Main purpose is to be able to use `https://polkadot.js.org/apps` to connect to -a running network. - -## How to? - -In current directory: -```bash -docker-compose up -d -``` - -Then start `rialto` network with the same command (one folder up). `nginx` should -pick up new containers being created and automatically create a proxy setup for `Charlie`. diff --git a/polkadot/bridges/deployments/reverse-proxy/docker-compose.yml b/polkadot/bridges/deployments/reverse-proxy/docker-compose.yml deleted file mode 100644 index 61c9505ae56..00000000000 --- a/polkadot/bridges/deployments/reverse-proxy/docker-compose.yml +++ /dev/null @@ -1,42 +0,0 @@ -version: '2' -services: - nginx-proxy: - image: jwilder/nginx-proxy - container_name: nginx-proxy - networks: - - nginx-proxy - - deployments_default - ports: - - "80:80" - - "443:443" - volumes: - - conf:/etc/nginx/conf.d - - vhost:/etc/nginx/vhost.d - - html:/usr/share/nginx/html - - dhparam:/etc/nginx/dhparam - - certs:/etc/nginx/certs:ro - - /var/run/docker.sock:/tmp/docker.sock:ro - - letsencrypt: - image: jrcs/letsencrypt-nginx-proxy-companion - container_name: nginx-proxy-le - networks: - - nginx-proxy - volumes_from: - - nginx-proxy - volumes: - - certs:/etc/nginx/certs:rw - - /var/run/docker.sock:/var/run/docker.sock:ro - -volumes: - conf: - vhost: - html: - dhparam: - certs: - -networks: - nginx-proxy: - driver: bridge - deployments_default: - external: true diff --git a/polkadot/bridges/deployments/run.sh b/polkadot/bridges/deployments/run.sh deleted file mode 100755 index 5c1cded1e83..00000000000 --- a/polkadot/bridges/deployments/run.sh +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash - -# Script used for running and updating bridge deployments. -# -# To deploy a network you can run this script with the name of the bridge (or multiple bridges) you want to run. -# -# `./run.sh westend-millau rialto-millau` -# -# To update a deployment to use the latest images available from the Docker Hub add the `update` -# argument after the bridge name. -# -# `./run.sh rialto-millau update` -# -# Once you've stopped having fun with your deployment you can take it down with: -# -# `./run.sh rialto-millau stop` -# -# Stopping the bridge will also bring down all networks that it uses. So if you have started multiple bridges -# that are using the same network (like Millau in rialto-millau and westend-millau bridges), then stopping one -# of these bridges will cause the other bridge to break. - -set -xeu - -# Since the Compose commands are using relative paths we need to `cd` into the `deployments` folder. -cd "$( dirname "${BASH_SOURCE[0]}" )" - -function show_help () { - set +x - echo " " - echo Error: $1 - echo " " - echo "Usage:" - echo " ./run.sh rialto-millau [stop|update] Run Rialto <> Millau Networks & Bridge" - echo " ./run.sh westend-millau [stop|update] Run Westend -> Millau Networks & Bridge" - echo " " - echo "Options:" - echo " --no-monitoring Disable monitoring" - echo " --no-ui Disable UI" - echo " " - echo "You can start multiple bridges at once by passing several bridge names:" - echo " ./run.sh rialto-millau westend-millau [stop|update]" - exit 1 -} - -RIALTO=' -f ./networks/rialto.yml -f ./networks/rialto-parachain.yml' -MILLAU=' -f ./networks/millau.yml' -MONITORING=' -f ./monitoring/docker-compose.yml' -UI=' -f ./ui/docker-compose.yml' - -BRIDGES=() -NETWORKS='' -SUB_COMMAND='start' -for i in "$@" -do - case $i in - --no-monitoring) - MONITORING=" -f ./monitoring/disabled.yml" - shift - continue - ;; - --no-ui) - UI="" - shift - continue - ;; - rialto-millau) - BRIDGES+=($i) - NETWORKS+=${RIALTO} - RIALTO='' - NETWORKS+=${MILLAU} - MILLAU='' - shift - ;; - westend-millau) - BRIDGES+=($i) - NETWORKS+=${MILLAU} - MILLAU='' - shift - ;; - start|stop|update) - SUB_COMMAND=$i - shift - ;; - *) - show_help "Unknown option: $i" - ;; - esac -done - -if [ ${#BRIDGES[@]} -eq 0 ]; then - show_help "Missing bridge name." -fi - -COMPOSE_FILES=$NETWORKS$MONITORING$UI - -# Compose looks for .env files in the the current directory by default, we don't want that -COMPOSE_ARGS="--project-directory ." -# Path to env file that we want to use. Compose only accepts single `--env-file` argument, -# so we'll be using the last .env file we'll found. -COMPOSE_ENV_FILE='' - -for BRIDGE in "${BRIDGES[@]}" -do - BRIDGE_PATH="./bridges/$BRIDGE" - BRIDGE=" -f $BRIDGE_PATH/docker-compose.yml" - COMPOSE_FILES=$BRIDGE$COMPOSE_FILES - - # Remember .env file to use in docker-compose call - if [[ -f "$BRIDGE_PATH/.env" ]]; then - COMPOSE_ENV_FILE=" --env-file $BRIDGE_PATH/.env" - fi - - # Read and source variables from .env file so we can use them here - grep -e MATRIX_ACCESS_TOKEN -e WITH_PROXY $BRIDGE_PATH/.env > .env2 && . ./.env2 && rm .env2 - if [ ! -z ${MATRIX_ACCESS_TOKEN+x} ]; then - sed -i "s/access_token.*/access_token: \"$MATRIX_ACCESS_TOKEN\"/" ./monitoring/grafana-matrix/config.yml - fi -done - -# Final COMPOSE_ARGS -COMPOSE_ARGS="$COMPOSE_ARGS $COMPOSE_ENV_FILE" - -# Check the sub-command, perhaps we just mean to stop the network instead of starting it. -if [ "$SUB_COMMAND" == "stop" ]; then - - if [ ! -z ${WITH_PROXY+x} ]; then - cd ./reverse-proxy - docker-compose down - cd - - fi - - docker-compose $COMPOSE_ARGS $COMPOSE_FILES down - - exit 0 -fi - -# See if we want to update the docker images before starting the network. -if [ "$SUB_COMMAND" == "update" ]; then - - # Stop the proxy cause otherwise the network can't be stopped - if [ ! -z ${WITH_PROXY+x} ]; then - cd ./reverse-proxy - docker-compose down - cd - - fi - - - docker-compose $COMPOSE_ARGS $COMPOSE_FILES pull - docker-compose $COMPOSE_ARGS $COMPOSE_FILES down - docker-compose $COMPOSE_ARGS $COMPOSE_FILES build -fi - -docker-compose $COMPOSE_ARGS $COMPOSE_FILES up -d - -# Start the proxy if needed -if [ ! -z ${WITH_PROXY+x} ]; then - cd ./reverse-proxy - docker-compose up -d -fi diff --git a/polkadot/bridges/deployments/types-millau.json b/polkadot/bridges/deployments/types-millau.json deleted file mode 100644 index 6d651b4c7cf..00000000000 --- a/polkadot/bridges/deployments/types-millau.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "--1": "Millau Types", - "MillauAddress": "AccountId", - "MillauLookupSource": "AccountId", - "MillauBalance": "u64", - "MillauBlockHash": "H512", - "MillauBlockNumber": "u64", - "MillauHeader": { - "parent_Hash": "MillauBlockHash", - "number": "Compact", - "state_root": "MillauBlockHash", - "extrinsics_root": "MillauBlockHash", - "digest": "MillauDigest" - }, - "MillauDigest": { - "logs": "Vec" - }, - "MillauDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "MillauBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - }, - "--2": "Rialto Types", - "RialtoAddress": "MultiAddress", - "RialtoLookupSource": "MultiAddress", - "RialtoBalance": "u128", - "RialtoBlockHash": "H256", - "RialtoBlockNumber": "u32", - "RialtoHeader": { - "parent_Hash": "RialtoBlockHash", - "number": "Compact", - "state_root": "RialtoBlockHash", - "extrinsics_root": "RialtoBlockHash", - "digest": "RialtoDigest" - }, - "RialtoDigest": { - "logs": "Vec" - }, - "RialtoDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "RialtoBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - }, - "--3": "Common types", - "AccountSigner": "MultiSigner", - "SpecVersion": "u32", - "RelayerId": "AccountId", - "SourceAccountId": "AccountId", - "ImportedHeader": { - "header": "BridgedHeader", - "requires_justification": "bool", - "is_finalized": "bool", - "signal_hash": "Option" - }, - "AuthoritySet": { - "authorities": "AuthorityList", - "set_id": "SetId" - }, - "Id": "[u8; 4]", - "ChainId": "Id", - "LaneId": "Id", - "MessageNonce": "u64", - "BridgeMessageId": "(Id, u64)", - "MessageKey": { - "lane_id": "LaneId", - "nonce:": "MessageNonce" - }, - "InboundRelayer": "AccountId", - "InboundLaneData": { - "relayers": "Vec", - "last_confirmed_nonce": "MessageNonce" - }, - "UnrewardedRelayer": { - "relayer": "RelayerId", - "messages": "DeliveredMessages" - }, - "DeliveredMessages": { - "begin": "MessageNonce", - "end": "MessageNonce", - "dispatch_results": "BitVec" - }, - "OutboundLaneData": { - "oldest_unpruned_nonce": "MessageNonce", - "latest_received_nonce": "MessageNonce", - "latest_generated_nonce": "MessageNonce" - }, - "MessageData": { - "payload": "MessagePayload", - "fee": "Fee" - }, - "MessagePayload": "Vec", - "BridgedOpaqueCall": "Vec", - "OutboundMessageFee": "Fee", - "OutboundPayload": { - "spec_version": "SpecVersion", - "weight": "Weight", - "origin": "CallOrigin", - "dispatch_fee_payment": "DispatchFeePayment", - "call": "BridgedOpaqueCall" - }, - "CallOrigin": { - "_enum": { - "SourceRoot": "()", - "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", - "SourceAccount": "SourceAccountId" - } - }, - "DispatchFeePayment": { - "_enum": { - "AtSourceChain": "()", - "AtTargetChain": "()" - } - }, - "MultiSigner": { - "_enum": { - "Ed25519": "H256", - "Sr25519": "H256", - "Ecdsa": "[u8;33]" - } - }, - "MessagesProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId", - "nonces_start": "MessageNonce", - "nonces_end": "MessageNonce" - }, - "StorageProofItem": "Vec", - "MessagesDeliveryProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId" - }, - "UnrewardedRelayersState": { - "unrewarded_relayer_entries": "MessageNonce", - "messages_in_oldest_entry": "MessageNonce", - "total_messages": "MessageNonce" - }, - "AncestryProof": "()", - "MessageFeeData": { - "lane_id": "LaneId", - "payload": "OutboundPayload" - }, - "Precommit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber" - }, - "AuthoritySignature": "[u8;64]", - "AuthorityId": "[u8;32]", - "SignedPrecommit": { - "precommit": "Precommit", - "signature": "AuthoritySignature", - "id": "AuthorityId" - }, - "Commit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber", - "precommits": "Vec" - }, - "GrandpaJustification": { - "round": "u64", - "commit": "Commit", - "votes_ancestries": "Vec" - }, - "Address": "MillauAddress", - "LookupSource": "MillauLookupSource", - "Fee": "MillauBalance", - "Balance": "MillauBalance", - "Hash": "MillauBlockHash", - "BlockHash": "MillauBlockHash", - "BlockNumber": "MillauBlockNumber", - "BridgedBlockHash": "RialtoBlockHash", - "BridgedBlockNumber": "RialtoBlockNumber", - "BridgedHeader": "RialtoHeader", - "Parameter": { - "_enum": { - "MillauToRialtoConversionRate": "u128" - } - } -} diff --git a/polkadot/bridges/deployments/types-rialto.json b/polkadot/bridges/deployments/types-rialto.json deleted file mode 100644 index a574e117893..00000000000 --- a/polkadot/bridges/deployments/types-rialto.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "--1": "Millau Types", - "MillauAddress": "AccountId", - "MillauLookupSource": "AccountId", - "MillauBalance": "u64", - "MillauBlockHash": "H512", - "MillauBlockNumber": "u64", - "MillauHeader": { - "parent_Hash": "MillauBlockHash", - "number": "Compact", - "state_root": "MillauBlockHash", - "extrinsics_root": "MillauBlockHash", - "digest": "MillauDigest" - }, - "MillauDigest": { - "logs": "Vec" - }, - "MillauDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "MillauBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - }, - "--2": "Rialto Types", - "RialtoAddress": "MultiAddress", - "RialtoLookupSource": "MultiAddress", - "RialtoBalance": "u128", - "RialtoBlockHash": "H256", - "RialtoBlockNumber": "u32", - "RialtoHeader": { - "parent_Hash": "RialtoBlockHash", - "number": "Compact", - "state_root": "RialtoBlockHash", - "extrinsics_root": "RialtoBlockHash", - "digest": "RialtoDigest" - }, - "RialtoDigest": { - "logs": "Vec" - }, - "RialtoDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "RialtoBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - }, - "--3": "Common types", - "AccountSigner": "MultiSigner", - "SpecVersion": "u32", - "RelayerId": "AccountId", - "SourceAccountId": "AccountId", - "ImportedHeader": { - "header": "BridgedHeader", - "requires_justification": "bool", - "is_finalized": "bool", - "signal_hash": "Option" - }, - "AuthoritySet": { - "authorities": "AuthorityList", - "set_id": "SetId" - }, - "Id": "[u8; 4]", - "ChainId": "Id", - "LaneId": "Id", - "MessageNonce": "u64", - "BridgeMessageId": "(Id, u64)", - "MessageKey": { - "lane_id": "LaneId", - "nonce:": "MessageNonce" - }, - "InboundRelayer": "AccountId", - "InboundLaneData": { - "relayers": "Vec", - "last_confirmed_nonce": "MessageNonce" - }, - "UnrewardedRelayer": { - "relayer": "RelayerId", - "messages": "DeliveredMessages" - }, - "DeliveredMessages": { - "begin": "MessageNonce", - "end": "MessageNonce", - "dispatch_results": "BitVec" - }, - "OutboundLaneData": { - "oldest_unpruned_nonce": "MessageNonce", - "latest_received_nonce": "MessageNonce", - "latest_generated_nonce": "MessageNonce" - }, - "MessageData": { - "payload": "MessagePayload", - "fee": "Fee" - }, - "MessagePayload": "Vec", - "BridgedOpaqueCall": "Vec", - "OutboundMessageFee": "Fee", - "OutboundPayload": { - "spec_version": "SpecVersion", - "weight": "Weight", - "origin": "CallOrigin", - "dispatch_fee_payment": "DispatchFeePayment", - "call": "BridgedOpaqueCall" - }, - "CallOrigin": { - "_enum": { - "SourceRoot": "()", - "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", - "SourceAccount": "SourceAccountId" - } - }, - "DispatchFeePayment": { - "_enum": { - "AtSourceChain": "()", - "AtTargetChain": "()" - } - }, - "MultiSigner": { - "_enum": { - "Ed25519": "H256", - "Sr25519": "H256", - "Ecdsa": "[u8;33]" - } - }, - "MessagesProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId", - "nonces_start": "MessageNonce", - "nonces_end": "MessageNonce" - }, - "StorageProofItem": "Vec", - "MessagesDeliveryProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId" - }, - "UnrewardedRelayersState": { - "unrewarded_relayer_entries": "MessageNonce", - "messages_in_oldest_entry": "MessageNonce", - "total_messages": "MessageNonce" - }, - "AncestryProof": "()", - "MessageFeeData": { - "lane_id": "LaneId", - "payload": "OutboundPayload" - }, - "Precommit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber" - }, - "AuthoritySignature": "[u8;64]", - "AuthorityId": "[u8;32]", - "SignedPrecommit": { - "precommit": "Precommit", - "signature": "AuthoritySignature", - "id": "AuthorityId" - }, - "Commit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber", - "precommits": "Vec" - }, - "GrandpaJustification": { - "round": "u64", - "commit": "Commit", - "votes_ancestries": "Vec" - }, - "Address": "RialtoAddress", - "LookupSource": "RialtoLookupSource", - "Fee": "RialtoBalance", - "Balance": "RialtoBalance", - "BlockHash": "RialtoBlockHash", - "BlockNumber": "RialtoBlockNumber", - "BridgedBlockHash": "MillauBlockHash", - "BridgedBlockNumber": "MillauBlockNumber", - "BridgedHeader": "MillauHeader", - "Parameter": { - "_enum": { - "RialtoToMillauConversionRate": "u128" - } - }, - "ValidationCodeHash": "H256" -} diff --git a/polkadot/bridges/deployments/types-rococo.json b/polkadot/bridges/deployments/types-rococo.json deleted file mode 100644 index 6f4592a8d57..00000000000 --- a/polkadot/bridges/deployments/types-rococo.json +++ /dev/null @@ -1,151 +0,0 @@ -{ - "--1": "Rococo Types", - "RococoAddress": "AccountId", - "RococoLookupSource": "AccountId", - "RococoBalance": "u128", - "RococoBlockHash": "H256", - "RococoBlockNumber": "u32", - "RococoHeader": "Header", - "--2": "Wococo Types", - "WococoAddress": "AccountId", - "WococoLookupSource": "AccountId", - "WococoBalance": "RococoBalance", - "WococoBlockHash": "RococoBlockHash", - "WococoBlockNumber": "RococoBlockNumber", - "WococoHeader": "RococoHeader", - "--3": "Common types", - "AccountSigner": "MultiSigner", - "SpecVersion": "u32", - "RelayerId": "AccountId", - "SourceAccountId": "AccountId", - "ImportedHeader": { - "header": "BridgedHeader", - "requires_justification": "bool", - "is_finalized": "bool", - "signal_hash": "Option" - }, - "AuthoritySet": { - "authorities": "AuthorityList", - "set_id": "SetId" - }, - "Id": "[u8; 4]", - "ChainId": "Id", - "LaneId": "Id", - "MessageNonce": "u64", - "BridgeMessageId": "(Id, u64)", - "MessageKey": { - "lane_id": "LaneId", - "nonce:": "MessageNonce" - }, - "InboundRelayer": "AccountId", - "InboundLaneData": { - "relayers": "Vec", - "last_confirmed_nonce": "MessageNonce" - }, - "UnrewardedRelayer": { - "relayer": "RelayerId", - "messages": "DeliveredMessages" - }, - "DeliveredMessages": { - "begin": "MessageNonce", - "end": "MessageNonce", - "dispatch_results": "BitVec" - }, - "OutboundLaneData": { - "oldest_unpruned_nonce": "MessageNonce", - "latest_received_nonce": "MessageNonce", - "latest_generated_nonce": "MessageNonce" - }, - "MessageData": { - "payload": "MessagePayload", - "fee": "Fee" - }, - "MessagePayload": "Vec", - "BridgedOpaqueCall": "Vec", - "OutboundMessageFee": "Fee", - "OutboundPayload": { - "spec_version": "SpecVersion", - "weight": "Weight", - "origin": "CallOrigin", - "dispatch_fee_payment": "DispatchFeePayment", - "call": "BridgedOpaqueCall" - }, - "CallOrigin": { - "_enum": { - "SourceRoot": "()", - "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", - "SourceAccount": "SourceAccountId" - } - }, - "DispatchFeePayment": { - "_enum": { - "AtSourceChain": "()", - "AtTargetChain": "()" - } - }, - "MultiSigner": { - "_enum": { - "Ed25519": "H256", - "Sr25519": "H256", - "Ecdsa": "[u8;33]" - } - }, - "MessagesProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId", - "nonces_start": "MessageNonce", - "nonces_end": "MessageNonce" - }, - "StorageProofItem": "Vec", - "MessagesDeliveryProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId" - }, - "UnrewardedRelayersState": { - "unrewarded_relayer_entries": "MessageNonce", - "messages_in_oldest_entry": "MessageNonce", - "total_messages": "MessageNonce" - }, - "AncestryProof": "()", - "MessageFeeData": { - "lane_id": "LaneId", - "payload": "OutboundPayload" - }, - "Precommit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber" - }, - "AuthoritySignature": "[u8;64]", - "AuthorityId": "[u8;32]", - "SignedPrecommit": { - "precommit": "Precommit", - "signature": "AuthoritySignature", - "id": "AuthorityId" - }, - "Commit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber", - "precommits": "Vec" - }, - "GrandpaJustification": { - "round": "u64", - "commit": "Commit", - "votes_ancestries": "Vec" - }, - "Address": "RococoAddress", - "LookupSource": "RococoLookupSource", - "Fee": "RococoBalance", - "Balance": "RococoBalance", - "BlockHash": "RococoBlockHash", - "BlockNumber": "RococoBlockNumber", - "BridgedBlockHash": "WococoBlockHash", - "BridgedBlockNumber": "WococoBlockNumber", - "BridgedHeader": "WococoHeader", - "Parameter": { - "_enum": { - "RococoToWococoConversionRate": "u128" - } - } -} diff --git a/polkadot/bridges/deployments/types-wococo.json b/polkadot/bridges/deployments/types-wococo.json deleted file mode 100644 index 562f08afa9c..00000000000 --- a/polkadot/bridges/deployments/types-wococo.json +++ /dev/null @@ -1,152 +0,0 @@ -{ - "--1": "Rococo Types", - "RococoAddress": "AccountId", - "RococoLookupSource": "AccountId", - "RococoBalance": "u128", - "RococoBlockHash": "H256", - "RococoBlockNumber": "u32", - "RococoHeader": "Header", - "--2": "Wococo Types", - "WococoAddress": "AccountId", - "WococoLookupSource": "AccountId", - "WococoBalance": "RococoBalance", - "WococoBlockHash": "RococoBlockHash", - "WococoBlockNumber": "RococoBlockNumber", - "WococoHeader": "RococoHeader", - "--3": "Common types", - "AccountSigner": "MultiSigner", - "SpecVersion": "u32", - "RelayerId": "AccountId", - "SourceAccountId": "AccountId", - "ImportedHeader": { - "header": "BridgedHeader", - "requires_justification": "bool", - "is_finalized": "bool", - "signal_hash": "Option" - }, - "AuthoritySet": { - "authorities": "AuthorityList", - "set_id": "SetId" - }, - "Id": "[u8; 4]", - "ChainId": "Id", - "LaneId": "Id", - "MessageNonce": "u64", - "BridgeMessageId": "(Id, u64)", - "MessageKey": { - "lane_id": "LaneId", - "nonce:": "MessageNonce" - }, - "InboundRelayer": "AccountId", - "InboundLaneData": { - "relayers": "Vec", - "last_confirmed_nonce": "MessageNonce" - }, - "UnrewardedRelayer": { - "relayer": "RelayerId", - "messages": "DeliveredMessages" - }, - "DeliveredMessages": { - "begin": "MessageNonce", - "end": "MessageNonce", - "dispatch_results": "BitVec" - }, - "OutboundLaneData": { - "oldest_unpruned_nonce": "MessageNonce", - "latest_received_nonce": "MessageNonce", - "latest_generated_nonce": "MessageNonce" - }, - "MessageData": { - "payload": "MessagePayload", - "fee": "Fee" - }, - "MessagePayload": "Vec", - "BridgedOpaqueCall": "Vec", - "OutboundMessageFee": "Fee", - "OutboundPayload": { - "spec_version": "SpecVersion", - "weight": "Weight", - "origin": "CallOrigin", - "dispatch_fee_payment": "DispatchFeePayment", - "call": "BridgedOpaqueCall" - }, - "CallOrigin": { - "_enum": { - "SourceRoot": "()", - "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", - "SourceAccount": "SourceAccountId" - } - }, - "DispatchFeePayment": { - "_enum": { - "AtSourceChain": "()", - "AtTargetChain": "()" - } - }, - "MultiSigner": { - "_enum": { - "Ed25519": "H256", - "Sr25519": "H256", - "Ecdsa": "[u8;33]" - } - }, - "MessagesProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId", - "nonces_start": "MessageNonce", - "nonces_end": "MessageNonce" - }, - "StorageProofItem": "Vec", - "MessagesDeliveryProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId" - }, - "UnrewardedRelayersState": { - "unrewarded_relayer_entries": "MessageNonce", - "messages_in_oldest_entry": "MessageNonce", - "total_messages": "MessageNonce" - }, - "AncestryProof": "()", - "MessageFeeData": { - "lane_id": "LaneId", - "payload": "OutboundPayload" - }, - "Precommit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber" - }, - "AuthoritySignature": "[u8;64]", - "AuthorityId": "[u8;32]", - "SignedPrecommit": { - "precommit": "Precommit", - "signature": "AuthoritySignature", - "id": "AuthorityId" - }, - "Commit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber", - "precommits": "Vec" - }, - "GrandpaJustification": { - "round": "u64", - "commit": "Commit", - "votes_ancestries": "Vec" - }, - "Address": "WococoAddress", - "LookupSource": "WococoLookupSource", - "Fee": "WococoBalance", - "Balance": "WococoBalance", - "Hash": "WococoBlockHash", - "BlockHash": "WococoBlockHash", - "BlockNumber": "WococoBlockNumber", - "BridgedBlockHash": "RococoBlockHash", - "BridgedBlockNumber": "RococoBlockNumber", - "BridgedHeader": "RococoHeader", - "Parameter": { - "_enum": { - "WococoToRococoConversionRate": "u128" - } - } -} diff --git a/polkadot/bridges/deployments/types/build.sh b/polkadot/bridges/deployments/types/build.sh deleted file mode 100755 index 1bcfd23ee06..00000000000 --- a/polkadot/bridges/deployments/types/build.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# The script generates JSON type definition files in `./deployment` directory to be used for -# JS clients. -# -# It works by creating definitions for each side of the different bridge pairs we support -# (Rialto<>Millau and Rococo<>Wococo at the moment). -# -# To avoid duplication each bridge pair has a JSON file with common definitions, as well as a -# general JSON file with common definitions regardless of the bridge pair. These files are then -# merged with chain-specific type definitions. - -set -eux - -# Make sure we are in the right dir. -cd $(dirname $(realpath $0)) - -# Create types for our supported bridge pairs (Rialto<>Millau, Rococo<>Wococo) -jq -s '.[0] * .[1] * .[2]' rialto-millau.json common.json rialto.json > ../types-rialto.json -jq -s '.[0] * .[1] * .[2]' rialto-millau.json common.json millau.json > ../types-millau.json -jq -s '.[0] * .[1] * .[2]' rococo-wococo.json common.json rococo.json > ../types-rococo.json -jq -s '.[0] * .[1] * .[2]' rococo-wococo.json common.json wococo.json > ../types-wococo.json diff --git a/polkadot/bridges/deployments/types/common.json b/polkadot/bridges/deployments/types/common.json deleted file mode 100644 index 4e129f7132b..00000000000 --- a/polkadot/bridges/deployments/types/common.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "--3": "Common types", - "AccountSigner": "MultiSigner", - "SpecVersion": "u32", - "RelayerId": "AccountId", - "SourceAccountId": "AccountId", - "ImportedHeader": { - "header": "BridgedHeader", - "requires_justification": "bool", - "is_finalized": "bool", - "signal_hash": "Option" - }, - "AuthoritySet": { - "authorities": "AuthorityList", - "set_id": "SetId" - }, - "Id": "[u8; 4]", - "ChainId": "Id", - "LaneId": "Id", - "MessageNonce": "u64", - "BridgeMessageId": "(Id, u64)", - "MessageKey": { - "lane_id": "LaneId", - "nonce:": "MessageNonce" - }, - "InboundRelayer": "AccountId", - "InboundLaneData": { - "relayers": "Vec", - "last_confirmed_nonce": "MessageNonce" - }, - "UnrewardedRelayer": { - "relayer": "RelayerId", - "messages": "DeliveredMessages" - }, - "DeliveredMessages": { - "begin": "MessageNonce", - "end": "MessageNonce", - "dispatch_results": "BitVec" - }, - "OutboundLaneData": { - "oldest_unpruned_nonce": "MessageNonce", - "latest_received_nonce": "MessageNonce", - "latest_generated_nonce": "MessageNonce" - - }, - "MessageData": { - "payload": "MessagePayload", - "fee": "Fee" - }, - "MessagePayload": "Vec", - "BridgedOpaqueCall": "Vec", - "OutboundMessageFee": "Fee", - "OutboundPayload": { - "spec_version": "SpecVersion", - "weight": "Weight", - "origin": "CallOrigin", - "dispatch_fee_payment": "DispatchFeePayment", - "call": "BridgedOpaqueCall" - }, - "CallOrigin": { - "_enum": { - "SourceRoot": "()", - "TargetAccount": "(SourceAccountId, MultiSigner, MultiSignature)", - "SourceAccount": "SourceAccountId" - } - }, - "DispatchFeePayment": { - "_enum": { - "AtSourceChain": "()", - "AtTargetChain": "()" - } - }, - "MultiSigner": { - "_enum": { - "Ed25519": "H256", - "Sr25519": "H256", - "Ecdsa": "[u8;33]" - } - }, - "MessagesProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId", - "nonces_start": "MessageNonce", - "nonces_end": "MessageNonce" - }, - "StorageProofItem": "Vec", - "MessagesDeliveryProofOf": { - "bridged_header_hash": "BridgedBlockHash", - "storage_proof": "Vec", - "lane": "LaneId" - }, - "UnrewardedRelayersState": { - "unrewarded_relayer_entries": "MessageNonce", - "messages_in_oldest_entry": "MessageNonce", - "total_messages": "MessageNonce" - }, - "AncestryProof": "()", - "MessageFeeData": { - "lane_id": "LaneId", - "payload": "OutboundPayload" - }, - "Precommit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber" - }, - "AuthoritySignature": "[u8;64]", - "AuthorityId": "[u8;32]", - "SignedPrecommit": { - "precommit": "Precommit", - "signature": "AuthoritySignature", - "id": "AuthorityId" - }, - "Commit": { - "target_hash": "BridgedBlockHash", - "target_number": "BridgedBlockNumber", - "precommits": "Vec" - }, - "GrandpaJustification": { - "round": "u64", - "commit": "Commit", - "votes_ancestries": "Vec" - } -} diff --git a/polkadot/bridges/deployments/types/millau.json b/polkadot/bridges/deployments/types/millau.json deleted file mode 100644 index 589d5619df4..00000000000 --- a/polkadot/bridges/deployments/types/millau.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Address": "MillauAddress", - "LookupSource": "MillauLookupSource", - "Fee": "MillauBalance", - "Balance": "MillauBalance", - "Hash": "MillauBlockHash", - "BlockHash": "MillauBlockHash", - "BlockNumber": "MillauBlockNumber", - "BridgedBlockHash": "RialtoBlockHash", - "BridgedBlockNumber": "RialtoBlockNumber", - "BridgedHeader": "RialtoHeader", - "Parameter": { - "_enum": { - "MillauToRialtoConversionRate": "u128" - } - } -} diff --git a/polkadot/bridges/deployments/types/rialto-millau.json b/polkadot/bridges/deployments/types/rialto-millau.json deleted file mode 100644 index 971cf666d47..00000000000 --- a/polkadot/bridges/deployments/types/rialto-millau.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "--1": "Millau Types", - "MillauAddress": "AccountId", - "MillauLookupSource": "AccountId", - "MillauBalance": "u64", - "MillauBlockHash": "H512", - "MillauBlockNumber": "u64", - "MillauHeader": { - "parent_Hash": "MillauBlockHash", - "number": "Compact", - "state_root": "MillauBlockHash", - "extrinsics_root": "MillauBlockHash", - "digest": "MillauDigest" - }, - "MillauDigest": { - "logs": "Vec" - }, - "MillauDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "MillauBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - }, - "--2": "Rialto Types", - "RialtoAddress": "MultiAddress", - "RialtoLookupSource": "MultiAddress", - "RialtoBalance": "u128", - "RialtoBlockHash": "H256", - "RialtoBlockNumber": "u32", - "RialtoHeader": { - "parent_Hash": "RialtoBlockHash", - "number": "Compact", - "state_root": "RialtoBlockHash", - "extrinsics_root": "RialtoBlockHash", - "digest": "RialtoDigest" - }, - "RialtoDigest": { - "logs": "Vec" - }, - "RialtoDigestItem": { - "_enum": { - "Other": "Vec", - "AuthoritiesChange": "Vec", - "ChangesTrieRoot": "RialtoBlockHash", - "SealV0": "SealV0", - "Consensus": "Consensus", - "Seal": "Seal", - "PreRuntime": "PreRuntime" - } - } -} diff --git a/polkadot/bridges/deployments/types/rialto.json b/polkadot/bridges/deployments/types/rialto.json deleted file mode 100644 index 77c30b7cc2d..00000000000 --- a/polkadot/bridges/deployments/types/rialto.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Address": "RialtoAddress", - "LookupSource": "RialtoLookupSource", - "Fee": "RialtoBalance", - "Balance": "RialtoBalance", - "BlockHash": "RialtoBlockHash", - "BlockNumber": "RialtoBlockNumber", - "BridgedBlockHash": "MillauBlockHash", - "BridgedBlockNumber": "MillauBlockNumber", - "BridgedHeader": "MillauHeader", - "Parameter": { - "_enum": { - "RialtoToMillauConversionRate": "u128" - } - }, - "ValidationCodeHash": "H256" -} diff --git a/polkadot/bridges/deployments/types/rococo-wococo.json b/polkadot/bridges/deployments/types/rococo-wococo.json deleted file mode 100644 index e0864c2ffb0..00000000000 --- a/polkadot/bridges/deployments/types/rococo-wococo.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "--1": "Rococo Types", - "RococoAddress": "AccountId", - "RococoLookupSource": "AccountId", - "RococoBalance": "u128", - "RococoBlockHash": "H256", - "RococoBlockNumber": "u32", - "RococoHeader": "Header", - "--2": "Wococo Types", - "WococoAddress": "AccountId", - "WococoLookupSource": "AccountId", - "WococoBalance": "RococoBalance", - "WococoBlockHash": "RococoBlockHash", - "WococoBlockNumber": "RococoBlockNumber", - "WococoHeader": "RococoHeader" -} diff --git a/polkadot/bridges/deployments/types/rococo.json b/polkadot/bridges/deployments/types/rococo.json deleted file mode 100644 index fa1bf275009..00000000000 --- a/polkadot/bridges/deployments/types/rococo.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "Address": "RococoAddress", - "LookupSource": "RococoLookupSource", - "Fee": "RococoBalance", - "Balance": "RococoBalance", - "BlockHash": "RococoBlockHash", - "BlockNumber": "RococoBlockNumber", - "BridgedBlockHash": "WococoBlockHash", - "BridgedBlockNumber": "WococoBlockNumber", - "BridgedHeader": "WococoHeader", - "Parameter": { - "_enum": { - "RococoToWococoConversionRate": "u128" - } - } -} diff --git a/polkadot/bridges/deployments/types/wococo.json b/polkadot/bridges/deployments/types/wococo.json deleted file mode 100644 index 7c7b4ff2768..00000000000 --- a/polkadot/bridges/deployments/types/wococo.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Address": "WococoAddress", - "LookupSource": "WococoLookupSource", - "Fee": "WococoBalance", - "Balance": "WococoBalance", - "Hash": "WococoBlockHash", - "BlockHash": "WococoBlockHash", - "BlockNumber": "WococoBlockNumber", - "BridgedBlockHash": "RococoBlockHash", - "BridgedBlockNumber": "RococoBlockNumber", - "BridgedHeader": "RococoHeader", - "Parameter": { - "_enum": { - "WococoToRococoConversionRate": "u128" - } - } -} diff --git a/polkadot/bridges/deployments/ui/README.md b/polkadot/bridges/deployments/ui/README.md deleted file mode 100644 index ad946fc699b..00000000000 --- a/polkadot/bridges/deployments/ui/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# bridges-ui - -This is a Bridges UI docker configuration file. The source of the Bridges UI code -can be found in [the repository](https://github.com/paritytech/parity-bridges-ui). -The CI should create and publish a docker image that is used by this configuration -file, so that the code is always using the latest version. -The UI is configured to point to local Rialto and Millau nodes to retrieve the require -data. - -This image can be used together with `nginx-proxy` to expose the UI externally. See -`VIRTUAL_*` and `LETSENCRYPT_*` environment variables. - -After start the UI is available at `http://localhost:8080` - -## How to? - -In current directory: -```bash -docker-compose up -d -``` - -Then start `rialto` & `millau` networks with the same command (one folder up) or -run the full setup by using `../run.sh` script. diff --git a/polkadot/bridges/deployments/ui/docker-compose.yml b/polkadot/bridges/deployments/ui/docker-compose.yml deleted file mode 100644 index 8b3f8178c36..00000000000 --- a/polkadot/bridges/deployments/ui/docker-compose.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: '3.5' -services: - bridges-ui: - image: paritytech/parity-bridges-ui - environment: - VIRTUAL_HOST: ui.brucke.link - VIRTUAL_PORT: 80 - LETSENCRYPT_HOST: ui.brucke.link - LETSENCRYPT_EMAIL: admin@parity.io - CHAIN_1_SUBSTRATE_PROVIDER: ${UI_CHAIN_1:-ws://localhost:9944} - CHAIN_2_SUBSTRATE_PROVIDER: ${UI_CHAIN_2:-ws://localhost:19944} - ports: - - "8080:80" diff --git a/polkadot/bridges/docs/high-level-overview.md b/polkadot/bridges/docs/high-level-overview.md deleted file mode 100644 index 2642c20c86a..00000000000 --- a/polkadot/bridges/docs/high-level-overview.md +++ /dev/null @@ -1,165 +0,0 @@ -# High-Level Bridge Documentation - -## Purpose - -Trustless connecting between two Substrate-based chains using GRANDPA finality. - -## Overview - -Even though we support two-way bridging, the documentation will generally talk about a one-sided -interaction. That's to say, we will only talk about syncing headers and messages from a _source_ -chain to a _target_ chain. This is because the two-sided interaction is really just the one-sided -interaction with the source and target chains switched. - -To understand the full interaction with the bridge, take a look at the -[testing scenarios](./testing-scenarios.md) document. It describes potential use cases and describes -how each of the layers outlined below is involved. - -The bridge is built from various components. Here is a quick overview of the important ones. - -### Header Sync - -A light client of the source chain built into the target chain's runtime. It is a single FRAME -pallet. It provides a "source of truth" about the source chain headers which have been finalized. -This is useful for higher level applications. - -### Headers Relayer - -A standalone application connected to both chains. It submits every source chain header it sees to -the target chain through RPC. - -### Message Delivery - -A FRAME pallet built on top of the header sync pallet. It allows users to submit messages to the -source chain, which are to be delivered to the target chain. The delivery protocol doesn't care -about the payload more than it has to. Handles replay protection and message ordering. - -### Message Dispatch - -A FRAME pallet responsible for interpreting the payload of delivered messages. - -### Message Relayer - -A standalone application handling delivery of the messages from source chain to the target chain. - -## Processes - -High level sequence charts of the process can be found in [a separate document](./high-level.html). - -### Substrate (GRANDPA) Header Sync - -The header sync pallet (`pallet-bridge-grandpa`) is an on-chain light client for chains which use -GRANDPA finality. It is part of the target chain's runtime, and accepts finality proofs from the source -chain. Verify GRANDPA finality proofs (a.k.a justifications) and track GRANDPA finality set changes. - -The pallet does not care about what block production mechanism is used for the source chain -(e.g Aura or BABE) as long as it uses the GRANDPA finality gadget. In fact the pallet does not -necessarily store all produced headers, we only import headers with valid GRANDPA justifications. - -Referer to the [pallet documentation](../modules/grandpa/src/lib.rs) for more details. - -#### Header Relayer strategy - -There is currently no reward strategy for the relayers at all. They also are not required to be -staked or registered on-chain, unlike in other bridge designs. We consider the header sync to be -an essential part of the bridge and the incentivization should be happening on the higher layers. - -At the moment, signed transactions are the only way to submit headers to the header sync pallet. -However, in the future we would like to use unsigned transactions for headers delivery. This will -allow transaction de-duplication to be done at the transaction pool level and also remove the cost -for message relayers to run header relayers. - -### Message Passing - -Once header sync is maintained, the target side of the bridge can receive and verify proofs about -events happening on the source chain, or its internal state. On top of this, we built a message -passing protocol which consists of two parts described in following sections: message delivery and -message dispatch. - -#### Message Lanes Delivery - -The [Message delivery pallet](../modules/messages/src/lib.rs) is responsible for queueing up -messages and delivering them in order on the target chain. It also dispatches messages, but we will -cover that in the next section. - -The pallet supports multiple lanes (channels) where messages can be added. Every lane can be -considered completely independent from others, which allows them to make progress in parallel. -Different lanes can be configured to validated messages differently (e.g higher rewards, specific -types of payload, etc.) and may be associated with a particular "user application" built on top of -the bridge. Note that messages in the same lane MUST be delivered _in the same order_ they were -queued up. - -The message delivery protocol does not care about the payload it transports and can be coupled -with an arbitrary message dispatch mechanism that will interpret and execute the payload if delivery -conditions are met. Each delivery on the target chain is confirmed back to the source chain by the -relayer. This is so that she can collect the reward for delivering these messages. - -Users of the pallet add their messages to an "outbound lane" on the source chain. When a block is -finalized message relayers are responsible for reading the current queue of messages and submitting -some (or all) of them to the "inbound lane" of the target chain. Each message has a `nonce` -associated with it, which serves as the ordering of messages. The inbound lane stores the last -delivered nonce to prevent replaying messages. To successfully deliver the message to the inbound lane -on target chain the relayer has to present present a storage proof which shows that the message was -part of the outbound lane on the source chain. - -During delivery of messages they are immediately dispatched on the target chain and the relayer is -required to declare the correct `weight` to cater for all messages dispatch and pay all required -fees of the target chain. To make sure the relayer is incentivised to do so, on the source chain: -- the user provides a declared dispatch weight of the payload -- the pallet calculates the expected fee on the target chain based on the declared weight -- the pallet converts the target fee into source tokens (based on a price oracle) and reserves - enough tokens to cover for the delivery, dispatch, confirmation and additional relayers reward. - -If the declared weight turns out to be too low on the target chain the message is delivered but -it immediately fails to dispatch. The fee and reward is collected by the relayer upon confirmation -of delivery. - -Due to the fact that message lanes require delivery confirmation transactions, they also strictly -require bi-directional header sync (i.e. you can't use message delivery with one-way header sync). - -#### Dispatching Messages - -The [Message dispatch pallet](../modules/dispatch/src/lib.rs) is used to perform the actions -specified by messages which have come over the bridge. For Substrate-based chains this means -interpreting the source chain's message as a `Call` on the target chain. - -An example `Call` of the target chain would look something like this: - -```rust -target_runtime::Call::Balances(target_runtime::pallet_balances::Call::transfer(recipient, amount)) -``` - -When sending a `Call` it must first be SCALE encoded and then sent to the source chain. The `Call` -is then delivered by the message lane delivery mechanism from the source chain to the target chain. -When a message is received the inbound message lane on the target chain will try and decode the -message payload into a `Call` enum. If it's successful it will be dispatched after we check that the -weight of the call does not exceed the weight declared by the sender. The relayer pays fees for -executing the transaction on the target chain, but her costs should be covered by the sender on the -source chain. - -When dispatching messages there are three Origins which can be used by the target chain: -1. Root Origin -2. Source Origin -3. Target Origin - -Senders of a message can indicate which one of the three origins they would like to dispatch their -message with. However, there are restrictions on who/what is allowed to dispatch messages with a -particular origin. - -The Root origin represents the source chain's Root account on the target chain. This origin can can -only be dispatched on the target chain if the "send message" request was made by the Root origin of -the source chain - otherwise the message will fail to be dispatched. - -The Source origin represents an account without a private key on the target chain. This account will -be generated/derived using the account ID of the sender on the source chain. We don't necessarily -require the source account id to be associated with a private key on the source chain either. This -is useful for representing things such as source chain proxies or pallets. - -The Target origin represents an account with a private key on the target chain. The sender on the -source chain needs to prove ownership of this account by using their target chain private key to -sign: `(Call, SourceChainAccountId).encode()`. This will be included in the message payload and -verified by the target chain before dispatch. - -See [`CallOrigin` documentation](../primitives/message-dispatch/src/lib.rs) for more details. - -#### Message Relayers Strategy diff --git a/polkadot/bridges/docs/high-level.html b/polkadot/bridges/docs/high-level.html deleted file mode 100644 index 3c4c6178c95..00000000000 --- a/polkadot/bridges/docs/high-level.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - High Level Bridge Components - - -

Header Sync

-

Header pallet on the target chain, keeps track of the forks, but requires finality for blocks that perform authority set changes. That means, it won't sync a fork with authority set change unless that change finalized.

-
- sequenceDiagram - participant Source Chain - participant Relayer - participant Target Chain - Note right of Target Chain: Best: 0, Finalized: 0 - Source Chain ->> Source Chain: Import Block 1 - Source Chain ->> Source Chain: Import Block 2 - Relayer ->> Target Chain: Submit Block 1 - Note right of Target Chain: Best: 1, Finalized: 0 - Relayer ->> Target Chain: Submit Block 2 - Note right of Target Chain: Best: 2, Finalized: 0 - Source Chain ->> Source Chain: Import Block 2' - Relayer ->> Target Chain: Submit Block 2' - Note right of Target Chain: Best: 2 or 2', Finalized: 0 - Source Chain ->> Source Chain: Finalize Block 2' - Relayer ->> Target Chain: Submit Finality of Block 2' - Note right of Target Chain: Best: 2', Finalized: 2' -
-

Message Delivery (single lane)

-

Pending messages are stored on-chain (source) so the relayer code is completely stateless - it can read all the details from the chain.

-

Delivering pending messages requires finality first.

-
- sequenceDiagram - participant Source Chain - participant Relayer - participant Target Chain - Source Chain ->> Source Chain: Queue Message 1 - Source Chain ->> Source Chain: Queue Message 2 - Source Chain ->> Source Chain: Queue Message 3 - Note left of Source Chain: Queued Messages: [1, 2, 3, ] - Note left of Source Chain: Reward for [1, 2, 3, ] reserved - Relayer ->> Target Chain: Deliver Messages 1..2 - Note right of Target Chain: Target chain dispatches the messages.
To Confirm: {1..2 => relayer_1} - Relayer ->> Source Chain: Delivery Confirmation of 1..2 - Note left of Source Chain: Queued Messages: [3, ] - Note left of Source Chain: Reward payout for [1, 2, ] - Relayer -->> Target Chain: Confirmed Messages 1..2 - Note right of Target Chain: To Confirm: {} - Note over Relayer, Target Chain: (this is not a separate transaction,
it's bundled with the "Deliver Messages" proof) -
- - - - diff --git a/polkadot/bridges/docs/plan.md b/polkadot/bridges/docs/plan.md deleted file mode 100644 index 9c4106d9ade..00000000000 --- a/polkadot/bridges/docs/plan.md +++ /dev/null @@ -1,22 +0,0 @@ -Plan for the Internal Audit: -1. High-level overview (describing layers, maybe with pictures) - - what have we done already. - [Tomek to present] - [Hernando to help with diagrams today] - -2. Demo? How to play with the network. - [Hernando] - -3. Demo of token transfer on Millau. - [Hernando] - -4. Go through the scenario description and let people ask questions in the meantime. - Jump to the code on demand. - [Tomek, Hernando, Slava] - - ... - -5. The roadmap - - outstanding issues. - [Tomek] - diff --git a/polkadot/bridges/docs/scenario1.html b/polkadot/bridges/docs/scenario1.html deleted file mode 100644 index 808a0c34f0d..00000000000 --- a/polkadot/bridges/docs/scenario1.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - Flow Chart of Millau to Rialto Transfer - - -

Scenario: mDave sending RLT to rEve

-
- sequenceDiagram - participant mDave - participant Millau - participant Bridge Relayer - participant Rialto - participant rEve - Rialto->>Rialto: Endow r(mDave) with RLT. - mDave->>Millau: send_message(transfer, 5 RLT, rEve) - Millau->>Millau: Locks fee & reward for the relayer and queues the message. - rect rgb(205, 226, 244) - Bridge Relayer->>+Millau: What's your best header? - Millau-->>-Bridge Relayer: It's header 5. - Bridge Relayer->>+Rialto: What's the best Millau header you know about? - Rialto-->>-Bridge Relayer: I only know about 4. - Bridge Relayer->>Rialto: Cool, here is Millau header 5 [`submit_signed_header()`]. - Bridge Relayer->>+Rialto: What's the best finalized Millau header you know about? - Rialto-->>-Bridge Relayer: I only know about 3. - Bridge Relayer->>+Millau: Do you have a finality proof for 4..5? - Millau-->>-Bridge Relayer: Yes I do, here it is. - Bridge Relayer->>Rialto: Here is the finality proof for 5 [`finalize_header()`]. - end - rect rgb(218, 195, 244) - Bridge Relayer->>+Millau: Do you have any messages for me to deliver (at 5)? - Millau-->>-Bridge Relayer: Yes, here they are. - Bridge Relayer->>+Rialto: I have some new messages for you [`receive_messages_proof()`]. - Rialto->>Rialto: Validate and Dispatch Message. - Rialto->>rEve: Transfer(5 RLT) from r(mDave). - Rialto-->>-Bridge Relayer: Event(Message Succesfully Dispatched). - Bridge Relayer->>Millau: I sent your message, can I get paid now [`receive_messages_delivery_proof`]? - Millau-->>Bridge Relayer: Yes, here you go $$$. - Bridge Relayer ->>Rialto: These messages are confirmed now, feel free to clean up. - end -
- - - - diff --git a/polkadot/bridges/docs/send-message.md b/polkadot/bridges/docs/send-message.md deleted file mode 100644 index 6984c56d67f..00000000000 --- a/polkadot/bridges/docs/send-message.md +++ /dev/null @@ -1,131 +0,0 @@ -# How to send messages - -The Substrate-to-Substrate relay comes with a command line interface (CLI) which is implemented -by the `substrate-relay` binary. - -``` -Substrate-to-Substrate relay - -USAGE: - substrate-relay - -FLAGS: - -h, --help - Prints help information - - -V, --version - Prints version information - - -SUBCOMMANDS: - help Prints this message or the help of the given subcommand(s) - init-bridge Initialize on-chain bridge pallet with current header data - relay-headers Start headers relay between two chains - relay-messages Start messages relay between two chains - send-message Send custom message over the bridge -``` -The relay related commands `relay-headers` and `relay-messages` are basically continously running a -sync loop between the `Millau` and `Rialto` chains. The `init-bridge` command submitts initialization -transactions. An initialization transaction brings an initial header and authorities set from a source -chain to a target chain. The header synchronization then starts from that header. - -For sending custom messages over an avialable bridge, the `send-message` command is used. - -``` -Send custom message over the bridge. - -Allows interacting with the bridge by sending messages over `Messages` component. The message is being sent to the -source chain, delivered to the target chain and dispatched there. - -USAGE: - substrate-relay send-message - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -SUBCOMMANDS: - help Prints this message or the help of the given subcommand(s) - millau-to-rialto Submit message to given Millau -> Rialto lane - rialto-to-millau Submit message to given Rialto -> Millau lane - -``` -Messages are send from a source chain to a target chain using a so called `message lane`. Message lanes handle -both, message transport and message dispatch. There is one command for submitting a message to each of the two -available bridges, namely `millau-to-rialto` and `rialto-to-millau`. - -Submitting a message requires a number of arguments to be provided. Those arguments are essentially the same -for both submit message commands, hence only the output for `millau-to-rialto` is shown below. - -``` -Submit message to given Millau -> Rialto lane - -USAGE: - substrate-relay send-message millau-to-rialto [OPTIONS] --lane --source-host --source-port --source-signer --origin --target-signer - -FLAGS: - -h, --help Prints help information - -V, --version Prints version information - -OPTIONS: - --fee - Delivery and dispatch fee. If not passed, determined automatically - - --lane Hex-encoded lane id - --source-host Connect to Source node at given host - --source-port Connect to Source node websocket server at given port - --source-signer - The SURI of secret key to use when transactions are submitted to the Source node - - --source-signer-password - The password for the SURI of secret key to use when transactions are submitted to the Source node - - --origin - The origin to use when dispatching the message on the target chain [possible values: Target, Source] - - --target-signer - The SURI of secret key to use when transactions are submitted to the Target node - - --target-signer-password - The password for the SURI of secret key to use when transactions are submitted to the Target node - - -SUBCOMMANDS: - help Prints this message or the help of the given subcommand(s) - remark Make an on-chain remark (comment) - transfer Transfer the specified `amount` of native tokens to a particular `recipient` - -``` -As can be seen from the output, there are two types of messages available: `remark` and `transfer`. -A remark is some opaque message which will be placed on-chain. For basic testing, a remark is -the easiest to go with. - -Usage of the arguments is best explained with an example. Below you can see, how a remark -would look like: - -``` -substrate-relay send-message millau-to-rialto \ - --source-host=127.0.0.1 \ - --source-port=10946 \ - --source-signer=//Dave \ - --target-signer=//Dave \ - --lane=00000000 \ - --origin Target \ - remark -``` -Messages are basically regular transactions. That means, they have to be signed. In order -to send a message, you have to control an account private key on both, the source and -the target chain. Those accounts are specified using the `--source-signer` and `--target-signer` -arguments in the example above. - -Message delivery and dispatch requires a fee to be paid. In the example above, we have not -specified the `--fee` argument. Hence, the fee will be estimated automatically. Note that -in order to pay the fee, the message sender account has to have sufficient funds available. - -The `--origin` argument allows to denote under which authority the message will be dispatched -on the target chain. Accepted values are `Target` and `Source`. - -Although not strictly necessary, it is recommended, to use one of the well-known development -accounts (`Alice`, `Bob`, `Charlie`, `Dave`, `Eve`) for message sending. Those accounts are -endowed with funds for fee payment. In addtion, the development `Seed URI` syntax -(like `//Dave`) for the signer can be used, which will remove the need for a password. diff --git a/polkadot/bridges/docs/testing-scenarios.md b/polkadot/bridges/docs/testing-scenarios.md deleted file mode 100644 index 343720524ec..00000000000 --- a/polkadot/bridges/docs/testing-scenarios.md +++ /dev/null @@ -1,221 +0,0 @@ -# Testing Scenarios - -In the scenarios, for simplicity, we call the chains Kusama (KSM token) and Polkadot (DOT token), -but they should be applicable to any other chains. The first scenario has detailed description about -the entire process (also see the [sequence diagram](./scenario1.html)). Other scenarios only contain -a simplified interaction focusing on things that are unique for that particular scenario. - -Notation: -- kX - user X interacting with Kusama chain. -- `k(kX)` - Kusama account id of user kX (native account id; usable on Kusama) -- `p(kX)` - Polkadot account id of user kX (account id derived from `k(kX)` usable on Polkadot) -- [Kusama] ... - Interaction happens on Kusama (e.g. the user interacts with Kusama chain) -- [Polkadot] ... - Interaction happens on Polkadot - -Basic Scenarios -=========================== - -Scenario 1: Kusama's Alice receiving & spending DOTs ---------------------------- - -Kusama's Alice (kAlice) receives 5 DOTs from Polkadot's Bob (pBob) and sends half of them to -kCharlie. - -1. Generate kAlice's DOT address (`p(kAlice)`). - See function: - - ```rust - bp_runtime::derive_account_id(b"pdot", kAlice) - ``` - - or: - - ```rust - let hash = bp_polkadot::derive_kusama_account_id(kAlice); - let p_kAlice = bp_polkadot::AccountIdConverter::convert(hash); - ``` - -2. [Polkadot] pBob transfers 5 DOTs to `p(kAlice)` - 1. Creates & Signs a transaction with `Call::Transfer(..)` - 1. It is included in block. - 1. kAlice observers Polkadot chain to see her balance at `p(kAlice)` updated. - -3. [Kusama] kAlice sends 2.5 DOTs to `p(kCharlie)` - 1. kAlice prepares: - ```rust - let call = polkadot::Call::Balances(polkadot::Balances::Transfer(p(kCharlie), 2.5DOT)).encode(); - let weight = call.get_dispatch_info().weight; - ``` - - 1. kAlice prepares Kusama transaction: - ```rust - kusama::Call::Messages::::send_message( - // dot-transfer-lane (truncated to 4bytes) - lane_id, - payload: MessagePayload { - // Get from current polkadot runtime (kind of hardcoded) - spec_version: 1, - // kAlice should know the exact dispatch weight of the call on the target - // source verifies: at least to cover call.length() and below max weight - weight, - // simply bytes, we don't know anything about that on the source chain - call, - // origin that should be used during dispatch on the target chain - origin: CallOrigin::SourceAccount(kAlice), - }, - delivery_and_dispatch_fee: { - (single_message_delivery_weight - // source weight = X * target weight - + convert_target_weight_to_source_weight(weight) - + confirmation_transaction_weight - ) - // This uses an on-chain oracle to convert weights of the target chain to source fee - * weight_to_fee - // additional reward for the relayer (pallet parameter) - + relayers_fee - }, - ) - ``` - - 1. [Kusama] kAlice sends Kusama transaction with the above `Call` and pays regular fees. The - dispatch additionally reservers target-chain delivery and dispatch fees (including relayer's - reward). - -4. [Kusama] kAlice's transaction is included in block `B1` - -### Syncing headers loop - -5. Relayer sees that `B1` has not yet been delivered to the target chain. - [Sync loop code](https://github.com/paritytech/parity-bridges-common/blob/8b327a94595c4a6fae6d7866e24ecf2390501e32/relays/headers-relay/src/sync_loop.rs#L199). - -1. Relayer prepares transaction which delivers `B1` and with all of the missing - ancestors to the target chain (one header per transaction). - -1. After the transaction is succesfully dispatched the Polkadot on-chain light client of the Kusama - chain learns about block `B1` - it is stored in the on-chain storage. - -### Syncing finality loop - -8. Relayer is subscribed to finality events on Kusama. Relayer gets a finality notification for - block `B3`. - -1. The header sync informs the target chain about `B1..B3` blocks (see point 6). - -1. Relayer learns about missing finalization of `B1..B3` on the target chain, see - [finality maintenance code](https://github.com/paritytech/parity-bridges-common/blob/8b327a94595c4a6fae6d7866e24ecf2390501e32/relays/substrate/src/headers_maintain.rs#L107). - -1. Relayer submits justification for `B3` to the target chain (`finalize_header`). - See [#421](https://github.com/paritytech/parity-bridges-common/issues/421) for multiple - authority set changes support in Relayer (i.e. what block the target chain expects, not only - what I have). - - Relayer is doing two things: - - syncing on demand (what blocks miss finality) - - and syncing as notifications are received (recently finalized on-chain) - -1. Eventually Polkadot on-chain light client of Kusama learns about finality of `B1`. - -### Syncing messages loop - -13. The relayer checks the on-chain storage (last finalized header on the source, best header on the - target): - - Kusama outbound lane - - Polkadot inbound lane - Lanes contains `latest_generated_nonce` and `latest_received_nonce` respectively. The relayer - syncs messages between that range. - -1. The relayer gets a proof for every message in that range (using the RPC of messages module) - -1. The relayer creates a message delivery transaction (but it has weight, size, and count limits). - The count limit is there to make the loop of delivery code bounded. - ```rust - receive_message_proof( - relayer_id, // account id of the source chain - proof, // messages + proofs (hash of source block `B1`, nonces, lane_id + storage proof) - dispatch_weight // relayer declares how much it will take to dispatch all messages in that transaction, - ) - ``` - The `proof` can also contain an update of outbound lane state of source chain, which indicates - the delivery confirmation of these messages and reward payment, so that the target chain can - truncate its unpayed rewards vector. - - The target chain stores `relayer_ids` that delivered messages because the relayer can generate - a storage proof to show that they did indeed deliver those messages. The reward is paid on the - source chain and we inform the target chain about that fact so it can prune these `relayer_ids`. - - It's totally fine if there are no messages, and we only include the reward payment proof - when calling that function. - -1. 🥳 the message is now delivered and dispatched on the target chain! - -1. The relayer now needs to confirm the delivery to claim her payment and reward on the source - chain. - -1. The relayer creates a transaction on the source chain with call: - - ```rust - receive_messages_delivery_proof( - proof, // hash of the finalized target chain block, lane_id, storage proof - ) - ``` - -### UI challenges - -- The UI should warn before (or prevent) sending to `k(kCharlie)`! - - -Scenario 2: Kusama's Alice nominating validators with her DOTs ---------------------------- - -kAlice receives 10 DOTs from pBob and nominates `p(pCharlie)` and `p(pDave)`. - -1. Generate kAlice's DOT address (`p(kAlice)`) -2. [Polkadot] pBob transfers 5 DOTs to `p(kAlice)` -3. [Kusama] kAlice sends a batch transaction: - - `staking::Bond` transaction to create stash account choosing `p(kAlice)` as the controller account. - - `staking::Nominate(vec![p(pCharlie)])` to nominate pCharlie using the controller account. - - -Scenario 3: Kusama Treasury receiving & spending DOTs ---------------------------- - -pBob sends 15 DOTs to Kusama Treasury which Kusama Governance decides to transfer to kCharlie. - -1. Generate source account for the treasury (`kTreasury`). -2. [Polkadot] pBob tarnsfers 15 DOTs to `p(kTreasury)`. -2. [Kusama] Send a governance proposal to send a bridge message which transfers funds to `p(kCharlie)`. -3. [Kusama] Dispatch the governance proposal using `kTreasury` account id. - -Extra scenarios -=========================== - -Scenario 4: Kusama's Alice setting up 1-of-2 multi-sig to spend from either Kusama or Polkadot ---------------------------- - -Assuming `p(pAlice)` has at least 7 DOTs already. - -1. Generate multisig account id: `pMultiSig = multi_account_id(&[p(kAlice), p(pAlice)], 1)`. -2. [Kusama] Transfer 7 DOTs to `pMultiSig` using `TargetAccount` origin of `pAlice`. -3. [Kusama] Transfer 2 DOTs to `p(kAlice)` from the multisig: - - Send `multisig::as_multi_threshold_1(vec![p(pAlice)], balances::Transfer(p(kAlice), 2))` - -Scenario 5: Kusama Treasury staking & nominating validators with DOTs ---------------------------- - -Scenario 6: Kusama Treasury voting in Polkadot's democracy proposal ---------------------------- - -Potentially interesting scenarios -=========================== - -Scenario 7: Polkadot's Bob spending his DOTs by using Kusama chain ---------------------------- - -We can assume he holds KSM. Problem: he can pay fees, but can't really send (sign) a transaction? -Shall we support some kind of dispatcher? - -Scenario 8: Kusama Governance taking over Kusama's Alice DOT holdings ---------------------------- - -We use `SourceRoot` call to transfer her's DOTs to Kusama treasury. Source chain root -should also be able to send messages as `CallOrigin::SourceAccount(Alice)` though. diff --git a/polkadot/bridges/fuzz/storage-proof/Cargo.lock b/polkadot/bridges/fuzz/storage-proof/Cargo.lock deleted file mode 100644 index 895acfd87f9..00000000000 --- a/polkadot/bridges/fuzz/storage-proof/Cargo.lock +++ /dev/null @@ -1,2252 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" - -[[package]] -name = "ahash" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "739f4a8db6605981345c5654f3a85b056ce52f37a39d34da03f25bf2151ea16e" - -[[package]] -name = "aho-corasick" -version = "0.7.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anyhow" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afddf7f520a80dbf76e6f50a35bca42a2331ef227a28b3b6dc5c2e2338d114b1" - -[[package]] -name = "arbitrary" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e0a02cf12f1b1f48b14cb7f8217b876d09992b39c816ffb3b1ba64dd979a87" - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "async-trait" -version = "0.1.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - -[[package]] -name = "autocfg" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" - -[[package]] -name = "backtrace" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" -dependencies = [ - "addr2line", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "bitflags" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" - -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] - -[[package]] -name = "bitvec" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" -dependencies = [ - "funty", - "radium 0.7.0", - "tap", - "wyz", -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "bp-runtime" -version = "0.1.0" -dependencies = [ - "frame-support", - "hash-db", - "num-traits", - "parity-scale-codec 3.1.5", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "byte-slice-cast" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" - -[[package]] -name = "byte-slice-cast" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae44d1a3d5a19df61dd0c8beb138458ac2a53a7ac09eba97d55592540004306b" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time", - "winapi", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "cpuid-bool" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-mac" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" -dependencies = [ - "generic-array 0.12.4", - "subtle 1.0.0", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array 0.14.4", - "subtle 2.4.0", -] - -[[package]] -name = "curve25519-dalek" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "434e1720189a637d44fe464f4df1e6eb900b4835255b14354497c78af37d9bb8" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f627126b946c25a4638eec0ea634fc52506dea98db118aae985118ce7c3d723f" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "derive_more" -version = "0.99.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.4", -] - -[[package]] -name = "dyn-clonable" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" -dependencies = [ - "dyn-clonable-impl", - "dyn-clone", -] - -[[package]] -name = "dyn-clonable-impl" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "dyn-clone" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" - -[[package]] -name = "ed25519" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" -dependencies = [ - "signature", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.0.2", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.9.2", - "zeroize", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "env_logger" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" -dependencies = [ - "atty", - "humantime", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "environmental" -version = "1.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.2", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "frame-metadata" -version = "12.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "parity-scale-codec 1.3.6", - "serde", - "sp-core", - "sp-std", -] - -[[package]] -name = "frame-support" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "bitflags", - "frame-metadata", - "frame-support-procedural", - "impl-trait-for-tuples", - "log", - "once_cell", - "parity-scale-codec 1.3.6", - "paste", - "serde", - "smallvec", - "sp-arithmetic", - "sp-core", - "sp-inherents", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-tracing", -] - -[[package]] -name = "frame-support-procedural" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "Inflector", - "frame-support-procedural-tools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "frame-support-procedural-tools-derive", - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "frame-support-procedural-tools-derive" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - -[[package]] -name = "futures" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9052a1a50244d8d5aa9bf55cbc2fb6f357c86cc52e46c62ed390a7180cf150" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d31b7ec7efab6eefc7c57233bb10b847986139d88cc2f5a02a1ae6871a1846" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e5145dde8da7d1b3892dad07a9c98fc04bc39892b1ecc9692cf53e2b780a65" - -[[package]] -name = "futures-executor" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9e59fdc009a4b3096bf94f740a0f2424c082521f20a9b08c5c07c48d90fd9b9" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28be053525281ad8259d47e4de5de657b25e7bac113458555bb4b70bc6870500" - -[[package]] -name = "futures-macro" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c287d25add322d9f9abdcdc5927ca398917996600182178774032e9f8258fedd" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf5c69029bda2e743fddd0582d1083951d65cc9539aebf8812f36c3491342d6" - -[[package]] -name = "futures-task" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13de07eb8ea81ae445aca7b69f5f7bf15d7bf4912d8ca37d6645c77ae8a58d86" -dependencies = [ - "once_cell", -] - -[[package]] -name = "futures-util" -version = "0.3.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632a8cd0f2a4b3fdea1657f08bde063848c3bd00f9bbf6e256b8be78802e624b" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "proc-macro-hack", - "proc-macro-nested", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - -[[package]] -name = "getrandom" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "wasi 0.10.1+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" -dependencies = [ - "ahash", -] - -[[package]] -name = "hermit-abi" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" - -[[package]] -name = "hmac" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" -dependencies = [ - "crypto-mac 0.7.0", - "digest 0.8.1", -] - -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "hmac-drbg" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" -dependencies = [ - "digest 0.8.1", - "generic-array 0.12.4", - "hmac 0.7.1", -] - -[[package]] -name = "honggfuzz" -version = "0.5.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bea09577d948a98a5f59b7c891e274c4fb35ad52f67782b3d0cb53b9c05301f1" -dependencies = [ - "arbitrary", - "lazy_static", - "memmap", -] - -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - -[[package]] -name = "impl-codec" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" -dependencies = [ - "parity-scale-codec 1.3.6", -] - -[[package]] -name = "impl-serde" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "instant" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" -dependencies = [ - "num-traits", -] - -[[package]] -name = "itoa" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" - -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.82" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89203f3fba0a3795506acaad8ebce3c80c0af93f994d5a1d7a0b1eeb23271929" - -[[package]] -name = "libsecp256k1" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" -dependencies = [ - "arrayref", - "crunchy", - "digest 0.8.1", - "hmac-drbg", - "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", - "typenum", -] - -[[package]] -name = "lock_api" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf3805d4480bb5b86070dcfeb9e2cb2ebc148adb753c5cca5f884d1d65a42b2" -dependencies = [ - "cfg-if 0.1.10", -] - -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "memchr" -version = "2.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" - -[[package]] -name = "memmap" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "memory-db" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cbd2a22f201c03cc1706a727842490abfea17b7b53260358239828208daba3c" -dependencies = [ - "hash-db", - "hashbrown", - "parity-util-mem", -] - -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "miniz_oxide" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-integer" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "object" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" - -[[package]] -name = "once_cell" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" -dependencies = [ - "parking_lot", -] - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "parity-scale-codec" -version = "1.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79602888a81ace83e3d1d4b2873286c1f5f906c84db667594e8db8da3506c383" -dependencies = [ - "arrayvec 0.5.2", - "bitvec 0.17.4", - "byte-slice-cast 0.3.5", - "parity-scale-codec-derive 1.2.2", - "serde", -] - -[[package]] -name = "parity-scale-codec" -version = "3.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" -dependencies = [ - "arrayvec 0.7.2", - "bitvec 1.0.0", - "byte-slice-cast 1.2.1", - "impl-trait-for-tuples", - "parity-scale-codec-derive 3.1.3", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "1.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198db82bb1c18fc00176004462dd809b2a6d851669550aa17af6dacd21ae0c14" -dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" -dependencies = [ - "proc-macro-crate 1.1.3", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-util-mem" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17f15cb05897127bf36a240085a1f0bbef7bce3024849eccf7f93f6171bc27" -dependencies = [ - "cfg-if 1.0.0", - "hashbrown", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot", - "primitive-types", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.41.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" - -[[package]] -name = "parking_lot" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ccb628cad4f84851442432c60ad8e1f607e29752d0bf072cbd0baf28aa34272" -dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall", - "smallvec", - "winapi", -] - -[[package]] -name = "paste" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" -dependencies = [ - "paste-impl", - "proc-macro-hack", -] - -[[package]] -name = "paste-impl" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" -dependencies = [ - "proc-macro-hack", -] - -[[package]] -name = "pbkdf2" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" -dependencies = [ - "byteorder", - "crypto-mac 0.7.0", -] - -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac 0.8.0", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439697af366c49a6d0a010c56a0d97685bc140ce0d377b13a2ea2aa42d64a827" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "ppv-lite86" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" - -[[package]] -name = "primitive-types" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3824ae2c5e27160113b9e029a10ec9e3f0237bad8029f69c7724393c9fdefd8" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-serde", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - -[[package]] -name = "proc-macro-crate" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" -dependencies = [ - "thiserror", - "toml", -] - -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - -[[package]] -name = "proc-macro2" -version = "1.0.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "quote" -version = "1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", - "rand_pcg", -] - -[[package]] -name = "rand" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18519b42a40024d661e1714153e9ad0c3de27cd495760ceb09710920f1098b1e" -dependencies = [ - "libc", - "rand_chacha 0.3.0", - "rand_core 0.6.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.1", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c026d7df8b298d90ccbbc5190bd04d85e159eaf5576caeacf8741da93ccbd2e5" -dependencies = [ - "getrandom 0.2.2", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "redox_syscall" -version = "0.1.57" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" - -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "regex" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", - "thread_local", -] - -[[package]] -name = "regex-automata" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" -dependencies = [ - "byteorder", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" - -[[package]] -name = "rustc-demangle" -version = "0.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "ryu" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" - -[[package]] -name = "scale-info" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c46be926081c9f4dd5dd9b6f1d3e3229f2360bc6502dd8836f84a93b7c75e99a" -dependencies = [ - "bitvec 1.0.0", - "cfg-if 1.0.0", - "derive_more", - "parity-scale-codec 3.1.5", - "scale-info-derive", -] - -[[package]] -name = "scale-info-derive" -version = "2.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e334bb10a245e28e5fd755cabcafd96cfcd167c99ae63a46924ca8d8703a3c" -dependencies = [ - "proc-macro-crate 1.1.3", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "curve25519-dalek 2.1.2", - "getrandom 0.1.16", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "serde", - "sha2 0.8.2", - "subtle 2.4.0", - "zeroize", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "secrecy" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" -dependencies = [ - "zeroize", -] - -[[package]] -name = "serde" -version = "1.0.120" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "166b2349061381baf54a58e4b13c89369feb0ef2eaa57198899e2312aac30aab" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.120" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca2a8cb5805ce9e3b95435e3765b7b553cecc762d938d409434338386cb5775" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fceb2595057b6891a4ee808f70054bd2d12f0e97f1cbb78689b59f676df325a" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -name = "sha2" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpuid-bool", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sharded-slab" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79c719719ee05df97490f80a45acfc99e5a30ce98a1e4fb67aee422745ae14e3" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "signature" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f0242b8e50dd9accdd56170e94ca1ebd223b098eb9c83539a6e367d0f36ae68" - -[[package]] -name = "slab" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" - -[[package]] -name = "smallvec" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "sp-application-crypto" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "parity-scale-codec 1.3.6", - "serde", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-arithmetic" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec 1.3.6", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-core" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "base58", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", - "futures", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", - "log", - "merlin", - "num-traits", - "parity-scale-codec 1.3.6", - "parity-util-mem", - "parking_lot", - "primitive-types", - "rand 0.7.3", - "regex", - "schnorrkel", - "secrecy", - "serde", - "sha2 0.9.2", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std", - "sp-storage", - "substrate-bip39", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", -] - -[[package]] -name = "sp-debug-derive" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.8.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "environmental", - "parity-scale-codec 1.3.6", - "sp-std", - "sp-storage", -] - -[[package]] -name = "sp-inherents" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "parity-scale-codec 1.3.6", - "parking_lot", - "sp-core", - "sp-std", - "thiserror", -] - -[[package]] -name = "sp-io" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "futures", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec 1.3.6", - "parking_lot", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime-interface", - "sp-state-machine", - "sp-std", - "sp-tracing", - "sp-trie", - "sp-wasm-interface", - "tracing", - "tracing-core", -] - -[[package]] -name = "sp-keystore" -version = "0.8.0" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "async-trait", - "derive_more", - "futures", - "merlin", - "parity-scale-codec 1.3.6", - "parking_lot", - "schnorrkel", - "sp-core", - "sp-externalities", -] - -[[package]] -name = "sp-panic-handler" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "backtrace", -] - -[[package]] -name = "sp-runtime" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec 1.3.6", - "parity-util-mem", - "paste", - "rand 0.7.3", - "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-std", -] - -[[package]] -name = "sp-runtime-interface" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec 1.3.6", - "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "Inflector", - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-state-machine" -version = "0.8.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec 1.3.6", - "parking_lot", - "rand 0.7.3", - "smallvec", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-std", - "sp-trie", - "thiserror", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-std" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" - -[[package]] -name = "sp-storage" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "impl-serde", - "parity-scale-codec 1.3.6", - "ref-cast", - "serde", - "sp-debug-derive", - "sp-std", -] - -[[package]] -name = "sp-tracing" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "log", - "parity-scale-codec 1.3.6", - "sp-std", - "tracing", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sp-trie" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec 1.3.6", - "sp-core", - "sp-std", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-wasm-interface" -version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#4b687dfb4def2b5eee9f5a20629e3fc3563587ee" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec 1.3.6", - "sp-std", - "wasmi", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "storage-proof-fuzzer" -version = "0.1.0" -dependencies = [ - "bp-runtime", - "env_logger", - "honggfuzz", - "log", - "sp-core", - "sp-runtime", - "sp-state-machine", - "sp-std", - "sp-trie", -] - -[[package]] -name = "substrate-bip39" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" -dependencies = [ - "hmac 0.7.1", - "pbkdf2 0.3.0", - "schnorrkel", - "sha2 0.8.2", - "zeroize", -] - -[[package]] -name = "subtle" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" - -[[package]] -name = "subtle" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" - -[[package]] -name = "syn" -version = "1.0.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc60a3d73ea6594cd712d830cc1f0390fd71542d8c8cd24e70cc54cdfd5e05d5" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "thiserror" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" -dependencies = [ - "once_cell", -] - -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "tiny-bip39" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" -dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.9.2", - "thiserror", - "unicode-normalization", - "zeroize", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "toml" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = [ - "serde", -] - -[[package]] -name = "tracing" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" -dependencies = [ - "cfg-if 1.0.0", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "tracing-log" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" -dependencies = [ - "ansi_term", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "trie-db" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc176c377eb24d652c9c69c832c832019011b6106182bf84276c66b66d5c9a6" -dependencies = [ - "hash-db", - "hashbrown", - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-root" -version = "0.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" -dependencies = [ - "hash-db", -] - -[[package]] -name = "twox-hash" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" -dependencies = [ - "cfg-if 0.1.10", - "rand 0.7.3", - "static_assertions", -] - -[[package]] -name = "typenum" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" - -[[package]] -name = "uint" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e11fe9a9348741cf134085ad57c249508345fe16411b3d7fb4ff2da2f1d6382e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-xid" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" - -[[package]] -name = "version_check" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.1+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" - -[[package]] -name = "wasmi" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" -dependencies = [ - "libc", - "memory_units", - "num-rational", - "num-traits", - "parity-wasm", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-util" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" -dependencies = [ - "winapi", -] - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "wyz" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" -dependencies = [ - "tap", -] - -[[package]] -name = "zeroize" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a974bcdd357f0dca4d41677db03436324d45a4c9ed2d0b873a5a360ce41c36" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] diff --git a/polkadot/bridges/fuzz/storage-proof/Cargo.toml b/polkadot/bridges/fuzz/storage-proof/Cargo.toml deleted file mode 100644 index b406054bc6e..00000000000 --- a/polkadot/bridges/fuzz/storage-proof/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "storage-proof-fuzzer" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -honggfuzz = "0.5.54" -log = "0.4.0" -env_logger = "0.8.3" - -# Bridge Dependencies - -bp-runtime = { path = "../../primitives/runtime" } - -# Substrate Dependencies - -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/fuzz/storage-proof/README.md b/polkadot/bridges/fuzz/storage-proof/README.md deleted file mode 100644 index 1eeec7562a9..00000000000 --- a/polkadot/bridges/fuzz/storage-proof/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Storage Proof Fuzzer - -## How to run? - -Install dependencies: -``` -$ sudo apt install build-essential binutils-dev libunwind-dev -``` -or on nix: -``` -$ nix-shell -p honggfuzz -``` - -Install `cargo hfuzz` plugin: -``` -$ cargo install honggfuzz -``` - -Run: -``` -$ cargo hfuzz run storage-proof-fuzzer -``` - -Use `HFUZZ_RUN_ARGS` to customize execution: -``` -# 1 second of timeout -# use 12 fuzzing thread -# be verbose -# stop after 1000000 fuzzing iteration -# exit upon crash -HFUZZ_RUN_ARGS="-t 1 -n 12 -v -N 1000000 --exit_upon_crash" cargo hfuzz run example -``` - -More details in the [official documentation](https://docs.rs/honggfuzz/0.5.52/honggfuzz/#about-honggfuzz). diff --git a/polkadot/bridges/fuzz/storage-proof/src/main.rs b/polkadot/bridges/fuzz/storage-proof/src/main.rs deleted file mode 100644 index 185d0e336c4..00000000000 --- a/polkadot/bridges/fuzz/storage-proof/src/main.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage Proof Checker fuzzer. - -#![warn(missing_docs)] - -use honggfuzz::fuzz; -// Logic for checking Substrate storage proofs. - -use sp_core::{Blake2Hasher, H256}; -use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; -use sp_std::vec::Vec; -use sp_trie::StorageProof; -use std::collections::HashMap; - -fn craft_known_storage_proof(input_vec: Vec<(Vec, Vec)>) -> (H256, StorageProof) { - let storage_proof_vec = - vec![(None, input_vec.iter().map(|x| (x.0.clone(), Some(x.1.clone()))).collect())]; - log::info!("Storage proof vec {:?}", storage_proof_vec); - let state_version = sp_runtime::StateVersion::default(); - let backend = >::from((storage_proof_vec, state_version)); - let root = backend.storage_root(std::iter::empty(), state_version).0; - let vector_element_proof = StorageProof::new( - prove_read(backend, input_vec.iter().map(|x| x.0.as_slice())) - .unwrap() - .iter_nodes(), - ); - (root, vector_element_proof) -} - -fn transform_into_unique(input_vec: Vec<(Vec, Vec)>) -> Vec<(Vec, Vec)> { - let mut output_hashmap = HashMap::new(); - let mut output_vec = Vec::new(); - for key_value_pair in input_vec { - output_hashmap.insert(key_value_pair.0, key_value_pair.1); //Only 1 value per key - } - for (key, val) in output_hashmap.iter() { - output_vec.push((key.clone(), val.clone())); - } - output_vec -} - -fn run_fuzzer() { - fuzz!(|input_vec: Vec<(Vec, Vec)>| { - if input_vec.is_empty() { - return - } - let unique_input_vec = transform_into_unique(input_vec); - let (root, craft_known_storage_proof) = craft_known_storage_proof(unique_input_vec.clone()); - let checker = - >::new(root, craft_known_storage_proof) - .expect("Valid proof passed; qed"); - for key_value_pair in unique_input_vec { - log::info!("Reading value for pair {:?}", key_value_pair); - assert_eq!(checker.read_value(&key_value_pair.0), Ok(Some(key_value_pair.1.clone()))); - } - }) -} - -fn main() { - env_logger::init(); - - loop { - run_fuzzer(); - } -} diff --git a/polkadot/bridges/modules/dispatch/Cargo.toml b/polkadot/bridges/modules/dispatch/Cargo.toml deleted file mode 100644 index 98164452b83..00000000000 --- a/polkadot/bridges/modules/dispatch/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "pallet-bridge-dispatch" -description = "A Substrate Runtime module that dispatches a bridge message, treating it simply as encoded Call" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -log = { version = "0.4.14", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } - -# Bridge dependencies - -bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-message-dispatch/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/modules/dispatch/README.md b/polkadot/bridges/modules/dispatch/README.md deleted file mode 100644 index 068ff1167f7..00000000000 --- a/polkadot/bridges/modules/dispatch/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# Call Dispatch Module - -The call dispatch module has a single internal (only callable by other runtime modules) entry point -for dispatching encoded calls (`pallet_bridge_dispatch::Module::dispatch`). Every dispatch -(successful or not) emits a corresponding module event. The module doesn't have any call-related -requirements - they may come from the bridged chain over some message lane, or they may be crafted -locally. But in this document we'll mostly talk about this module in the context of bridges. - -Every message that is being dispatched has three main characteristics: -- `bridge` is the 4-bytes identifier of the bridge where this message comes from. This may be the - identifier of the bridged chain (like `b"rlto"` for messages coming from `Rialto`), or the - identifier of the bridge itself (`b"rimi"` for `Rialto` <-> `Millau` bridge); -- `id` is the unique id of the message within the given bridge. For messages coming from the - [messages module](../messages/README.md), it may worth to use a tuple - `(LaneId, MessageNonce)` to identify a message; -- `message` is the `bp_message_dispatch::MessagePayload` structure. The `call` field is set - to the (potentially) encoded `Call` of this chain. - -The easiest way to understand what is happening when a `Call` is being dispatched, is to look at the -module events set: - -- `MessageRejected` event is emitted if a message has been rejected even before it has reached the - module. Dispatch then is called just to reflect the fact that message has been received, but we - have failed to pre-process it (e.g. because we have failed to decode `MessagePayload` structure - from the proof); -- `MessageVersionSpecMismatch` event is emitted if current runtime specification version differs - from the version that has been used to encode the `Call`. The message payload has the - `spec_version`, that is filled by the message submitter. If this value differs from the current - runtime version, dispatch mechanism rejects to dispatch the message. Without this check, we may - decode the wrong `Call` for example if method arguments were changed; -- `MessageCallDecodeFailed` event is emitted if we have failed to decode `Call` from the payload. - This may happen if the submitter has provided incorrect value in the `call` field, or if source - chain storage has been corrupted. The `Call` is decoded after `spec_version` check, so we'll never - try to decode `Call` from other runtime version; -- `MessageSignatureMismatch` event is emitted if submitter has chose to dispatch message using - specified this chain account (`bp_message_dispatch::CallOrigin::TargetAccount` origin), - but he has failed to prove that he owns the private key for this account; -- `MessageCallRejected` event is emitted if the module has been deployed with some call filter and - this filter has rejected the `Call`. In your bridge you may choose to reject all messages except - e.g. balance transfer calls; -- `MessageWeightMismatch` event is emitted if the message submitter has specified invalid `Call` - dispatch weight in the `weight` field of the message payload. The value of this field is compared - to the pre-dispatch weight of the decoded `Call`. If it is less than the actual pre-dispatch - weight, the dispatch is rejected. Keep in mind, that even if post-dispatch weight will be less - than specified, the submitter still have to declare (and pay for) the maximal possible weight - (that is the pre-dispatch weight); -- `MessageDispatchPaymentFailed` event is emitted if the message submitter has selected to pay - dispatch fee at the target chain, but has failed to do that; -- `MessageDispatched` event is emitted if the message has passed all checks and we have actually - dispatched it. The dispatch may still fail, though - that's why we are including the dispatch - result in the event payload. - -When we talk about module in context of bridges, these events are helping in following cases: - -1. when the message submitter has access to the state of both chains and wants to monitor what has - happened with his message. Then he could use the message id (that he gets from the - [messages module events](../messages/README.md#General-Information)) to filter events of - call dispatch module at the target chain and actually see what has happened with his message; - -1. when the message submitter only has access to the source chain state (for example, when sender is - the runtime module at the source chain). In this case, your bridge may have additional mechanism - to deliver dispatch proofs (which are storage proof of module events) back to the source chain, - thus allowing the submitter to see what has happened with his messages. diff --git a/polkadot/bridges/modules/dispatch/src/lib.rs b/polkadot/bridges/modules/dispatch/src/lib.rs deleted file mode 100644 index 1cff6bceedd..00000000000 --- a/polkadot/bridges/modules/dispatch/src/lib.rs +++ /dev/null @@ -1,1108 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module which takes care of dispatching messages received over the bridge. -//! -//! The messages are interpreted directly as runtime `Call`. We attempt to decode -//! them and then dispatch as usual. To prevent compatibility issues, the Calls have -//! to include a `spec_version`. This will be checked before dispatch. In the case of -//! a successful dispatch an event is emitted. - -#![cfg_attr(not(feature = "std"), no_std)] -// Generated by `decl_event!` -#![allow(clippy::unused_unit)] - -use bp_message_dispatch::{CallOrigin, MessageDispatch, MessagePayload, SpecVersion}; -use bp_runtime::{ - derive_account_id, - messages::{DispatchFeePayment, MessageDispatchResult}, - ChainId, SourceAccount, -}; -use codec::Encode; -use frame_support::{ - dispatch::Dispatchable, - ensure, - traits::{Contains, Get}, - weights::{extract_actual_weight, GetDispatchInfo}, -}; -use frame_system::RawOrigin; -use sp_runtime::traits::{BadOrigin, Convert, IdentifyAccount, MaybeDisplay, Verify}; -use sp_std::{fmt::Debug, prelude::*}; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + IsType<::Event>; - /// Id of the message. Whenever message is passed to the dispatch module, it emits - /// event with this id + dispatch result. Could be e.g. (LaneId, MessageNonce) if - /// it comes from the messages module. - type BridgeMessageId: Parameter; - /// Type of account ID on source chain. - type SourceChainAccountId: Parameter - + Member - + MaybeSerializeDeserialize - + Debug - + MaybeDisplay - + Ord; - /// Type of account public key on target chain. - type TargetChainAccountPublic: Parameter + IdentifyAccount; - /// Type of signature that may prove that the message has been signed by - /// owner of `TargetChainAccountPublic`. - type TargetChainSignature: Parameter + Verify; - /// The overarching dispatch call type. - type Call: Parameter - + GetDispatchInfo - + Dispatchable< - Origin = ::Origin, - PostInfo = frame_support::dispatch::PostDispatchInfo, - >; - /// Pre-dispatch filter for incoming calls. - /// - /// The pallet will filter all incoming calls right before they're dispatched. If this - /// filter rejects the call, special event (`Event::MessageCallRejected`) is emitted. - type CallFilter: Contains<>::Call>; - /// The type that is used to wrap the `Self::Call` when it is moved over bridge. - /// - /// The idea behind this is to avoid `Call` conversion/decoding until we'll be sure - /// that all other stuff (like `spec_version`) is ok. If we would try to decode - /// `Call` which has been encoded using previous `spec_version`, then we might end - /// up with decoding error, instead of `MessageVersionSpecMismatch`. - type EncodedCall: Decode + Encode + Into>::Call, ()>>; - /// A type which can be turned into an AccountId from a 256-bit hash. - /// - /// Used when deriving target chain AccountIds from source chain AccountIds. - type AccountIdConverter: sp_runtime::traits::Convert; - } - - type BridgeMessageIdOf = >::BridgeMessageId; - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet {} - - #[pallet::call] - impl, I: 'static> Pallet {} - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event, I: 'static = ()> { - /// Message has been rejected before reaching dispatch. - MessageRejected { source_chain: ChainId, id: BridgeMessageIdOf }, - /// Message has been rejected by dispatcher because of spec version mismatch. - MessageVersionSpecMismatch { - source_chain: ChainId, - id: BridgeMessageIdOf, - expected_version: SpecVersion, - passed_version: SpecVersion, - }, - /// Message has been rejected by dispatcher because of weight mismatch. - MessageWeightMismatch { - source_chain: ChainId, - id: BridgeMessageIdOf, - expected_weight: Weight, - passed_weight: Weight, - }, - /// Message signature mismatch. - MessageSignatureMismatch { source_chain: ChainId, id: BridgeMessageIdOf }, - /// We have failed to decode Call from the message. - MessageCallDecodeFailed { source_chain: ChainId, id: BridgeMessageIdOf }, - /// The call from the message has been rejected by the call filter. - MessageCallRejected { source_chain: ChainId, id: BridgeMessageIdOf }, - /// The origin account has failed to pay fee for dispatching the message. - MessageDispatchPaymentFailed { - source_chain: ChainId, - id: BridgeMessageIdOf, - origin_account: ::AccountId, - weight: Weight, - }, - /// Message has been dispatched with given result. - MessageDispatched { - source_chain: ChainId, - id: BridgeMessageIdOf, - result: DispatchResult, - }, - /// Phantom member, never used. Needed to handle multiple pallet instances. - _Dummy { phantom_member: PhantomData }, - } -} - -impl, I: 'static> MessageDispatch for Pallet { - type Message = MessagePayload< - T::SourceChainAccountId, - T::TargetChainAccountPublic, - T::TargetChainSignature, - T::EncodedCall, - >; - - fn dispatch_weight(message: &Self::Message) -> bp_message_dispatch::Weight { - message.weight - } - - fn dispatch Result<(), ()>>( - source_chain: ChainId, - target_chain: ChainId, - id: T::BridgeMessageId, - message: Result, - pay_dispatch_fee: P, - ) -> MessageDispatchResult { - // emit special even if message has been rejected by external component - let message = match message { - Ok(message) => message, - Err(_) => { - log::trace!( - target: "runtime::bridge-dispatch", - "Message {:?}/{:?}: rejected before actual dispatch", - source_chain, - id, - ); - Self::deposit_event(Event::MessageRejected { source_chain, id }); - return MessageDispatchResult { - dispatch_result: false, - unspent_weight: 0, - dispatch_fee_paid_during_dispatch: false, - } - }, - }; - - // verify spec version - // (we want it to be the same, because otherwise we may decode Call improperly) - let mut dispatch_result = MessageDispatchResult { - dispatch_result: false, - unspent_weight: message.weight, - dispatch_fee_paid_during_dispatch: false, - }; - let expected_version = ::Version::get().spec_version; - if message.spec_version != expected_version { - log::trace!( - "Message {:?}/{:?}: spec_version mismatch. Expected {:?}, got {:?}", - source_chain, - id, - expected_version, - message.spec_version, - ); - Self::deposit_event(Event::MessageVersionSpecMismatch { - source_chain, - id, - expected_version, - passed_version: message.spec_version, - }); - return dispatch_result - } - - // now that we have spec version checked, let's decode the call - let call = match message.call.into() { - Ok(call) => call, - Err(_) => { - log::trace!( - target: "runtime::bridge-dispatch", - "Failed to decode Call from message {:?}/{:?}", - source_chain, - id, - ); - Self::deposit_event(Event::MessageCallDecodeFailed { source_chain, id }); - return dispatch_result - }, - }; - - // prepare dispatch origin - let origin_account = match message.origin { - CallOrigin::SourceRoot => { - let hex_id = - derive_account_id::(source_chain, SourceAccount::Root); - let target_id = T::AccountIdConverter::convert(hex_id); - log::trace!(target: "runtime::bridge-dispatch", "Root Account: {:?}", &target_id); - target_id - }, - CallOrigin::TargetAccount(source_account_id, target_public, target_signature) => { - let digest = account_ownership_digest( - &call, - source_account_id, - message.spec_version, - source_chain, - target_chain, - ); - - let target_account = target_public.into_account(); - if !target_signature.verify(&digest[..], &target_account) { - log::trace!( - target: "runtime::bridge-dispatch", - "Message {:?}/{:?}: origin proof is invalid. Expected account: {:?} from signature: {:?}", - source_chain, - id, - target_account, - target_signature, - ); - Self::deposit_event(Event::MessageSignatureMismatch { source_chain, id }); - return dispatch_result - } - - log::trace!(target: "runtime::bridge-dispatch", "Target Account: {:?}", &target_account); - target_account - }, - CallOrigin::SourceAccount(source_account_id) => { - let hex_id = - derive_account_id(source_chain, SourceAccount::Account(source_account_id)); - let target_id = T::AccountIdConverter::convert(hex_id); - log::trace!(target: "runtime::bridge-dispatch", "Source Account: {:?}", &target_id); - target_id - }, - }; - - // filter the call - if !T::CallFilter::contains(&call) { - log::trace!( - target: "runtime::bridge-dispatch", - "Message {:?}/{:?}: the call ({:?}) is rejected by filter", - source_chain, - id, - call, - ); - Self::deposit_event(Event::MessageCallRejected { source_chain, id }); - return dispatch_result - } - - // verify weight - // (we want passed weight to be at least equal to pre-dispatch weight of the call - // because otherwise Calls may be dispatched at lower price) - let dispatch_info = call.get_dispatch_info(); - let expected_weight = dispatch_info.weight; - if message.weight < expected_weight { - log::trace!( - target: "runtime::bridge-dispatch", - "Message {:?}/{:?}: passed weight is too low. Expected at least {:?}, got {:?}", - source_chain, - id, - expected_weight, - message.weight, - ); - Self::deposit_event(Event::MessageWeightMismatch { - source_chain, - id, - expected_weight, - passed_weight: message.weight, - }); - return dispatch_result - } - - // pay dispatch fee right before dispatch - let pay_dispatch_fee_at_target_chain = - message.dispatch_fee_payment == DispatchFeePayment::AtTargetChain; - if pay_dispatch_fee_at_target_chain && - pay_dispatch_fee(&origin_account, message.weight).is_err() - { - log::trace!( - target: "runtime::bridge-dispatch", - "Failed to pay dispatch fee for dispatching message {:?}/{:?} with weight {}", - source_chain, - id, - message.weight, - ); - Self::deposit_event(Event::MessageDispatchPaymentFailed { - source_chain, - id, - origin_account, - weight: message.weight, - }); - return dispatch_result - } - dispatch_result.dispatch_fee_paid_during_dispatch = pay_dispatch_fee_at_target_chain; - - // finally dispatch message - let origin = RawOrigin::Signed(origin_account).into(); - - log::trace!(target: "runtime::bridge-dispatch", "Message being dispatched is: {:.4096?}", &call); - let result = call.dispatch(origin); - let actual_call_weight = extract_actual_weight(&result, &dispatch_info); - dispatch_result.dispatch_result = result.is_ok(); - dispatch_result.unspent_weight = message.weight.saturating_sub(actual_call_weight); - - log::trace!( - target: "runtime::bridge-dispatch", - "Message {:?}/{:?} has been dispatched. Weight: {} of {}. Result: {:?}. Call dispatch result: {:?}", - source_chain, - id, - actual_call_weight, - message.weight, - dispatch_result, - result, - ); - - Self::deposit_event(Event::MessageDispatched { - source_chain, - id, - result: result.map(drop).map_err(|e| e.error), - }); - - dispatch_result - } -} - -/// Check if the message is allowed to be dispatched on the target chain given the sender's origin -/// on the source chain. -/// -/// For example, if a message is sent from a "regular" account on the source chain it will not be -/// allowed to be dispatched as Root on the target chain. This is a useful check to do on the source -/// chain _before_ sending a message whose dispatch will be rejected on the target chain. -pub fn verify_message_origin< - SourceChainAccountId, - TargetChainAccountPublic, - TargetChainSignature, - Call, ->( - sender_origin: &RawOrigin, - message: &MessagePayload< - SourceChainAccountId, - TargetChainAccountPublic, - TargetChainSignature, - Call, - >, -) -> Result, BadOrigin> -where - SourceChainAccountId: PartialEq + Clone, -{ - match message.origin { - CallOrigin::SourceRoot => { - ensure!(sender_origin == &RawOrigin::Root, BadOrigin); - Ok(None) - }, - CallOrigin::TargetAccount(ref source_account_id, _, _) => { - ensure!(sender_origin == &RawOrigin::Signed(source_account_id.clone()), BadOrigin); - Ok(Some(source_account_id.clone())) - }, - CallOrigin::SourceAccount(ref source_account_id) => { - ensure!( - sender_origin == &RawOrigin::Signed(source_account_id.clone()) || - sender_origin == &RawOrigin::Root, - BadOrigin - ); - Ok(Some(source_account_id.clone())) - }, - } -} - -/// Target account ownership digest from the source chain. -/// -/// The byte vector returned by this function will be signed with a target chain account -/// private key. This way, the owner of `source_account_id` on the source chain proves that -/// the target chain account private key is also under his control. -pub fn account_ownership_digest( - call: &Call, - source_account_id: AccountId, - target_spec_version: SpecVersion, - source_chain_id: ChainId, - target_chain_id: ChainId, -) -> Vec -where - Call: Encode, - AccountId: Encode, - SpecVersion: Encode, -{ - let mut proof = Vec::new(); - call.encode_to(&mut proof); - source_account_id.encode_to(&mut proof); - target_spec_version.encode_to(&mut proof); - source_chain_id.encode_to(&mut proof); - target_chain_id.encode_to(&mut proof); - - proof -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use codec::Decode; - use frame_support::{parameter_types, weights::Weight}; - use frame_system::{EventRecord, Phase}; - use scale_info::TypeInfo; - use sp_core::H256; - use sp_runtime::{ - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, - }; - - type AccountId = u64; - type BridgeMessageId = [u8; 4]; - - const SOURCE_CHAIN_ID: ChainId = *b"srce"; - const TARGET_CHAIN_ID: ChainId = *b"trgt"; - - #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] - pub struct TestAccountPublic(AccountId); - - impl IdentifyAccount for TestAccountPublic { - type AccountId = AccountId; - - fn into_account(self) -> AccountId { - self.0 - } - } - - #[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] - pub struct TestSignature(AccountId); - - impl Verify for TestSignature { - type Signer = TestAccountPublic; - - fn verify>(&self, _msg: L, signer: &AccountId) -> bool { - self.0 == *signer - } - } - - pub struct AccountIdConverter; - - impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: H256) -> AccountId { - hash.to_low_u64_ne() - } - } - - type Block = frame_system::mocking::MockBlock; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - - use crate as call_dispatch; - - frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Dispatch: call_dispatch::{Pallet, Call, Event}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - } - - impl Config for TestRuntime { - type Event = Event; - type BridgeMessageId = BridgeMessageId; - type SourceChainAccountId = AccountId; - type TargetChainAccountPublic = TestAccountPublic; - type TargetChainSignature = TestSignature; - type Call = Call; - type CallFilter = TestCallFilter; - type EncodedCall = EncodedCall; - type AccountIdConverter = AccountIdConverter; - } - - #[derive(Decode, Encode)] - pub struct EncodedCall(Vec); - - impl From for Result { - fn from(call: EncodedCall) -> Result { - Call::decode(&mut &call.0[..]).map_err(drop) - } - } - - pub struct TestCallFilter; - - impl Contains for TestCallFilter { - fn contains(call: &Call) -> bool { - !matches!(*call, Call::System(frame_system::Call::fill_block { .. })) - } - } - - const TEST_SPEC_VERSION: SpecVersion = 0; - const TEST_WEIGHT: Weight = 1_000_000_000; - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - sp_io::TestExternalities::new(t) - } - - fn prepare_message( - origin: CallOrigin, - call: Call, - ) -> as MessageDispatch< - AccountId, - ::BridgeMessageId, - >>::Message { - MessagePayload { - spec_version: TEST_SPEC_VERSION, - weight: TEST_WEIGHT, - origin, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - call: EncodedCall(call.encode()), - } - } - - fn prepare_root_message( - call: Call, - ) -> as MessageDispatch< - AccountId, - ::BridgeMessageId, - >>::Message { - prepare_message(CallOrigin::SourceRoot, call) - } - - fn prepare_target_message( - call: Call, - ) -> as MessageDispatch< - AccountId, - ::BridgeMessageId, - >>::Message { - let origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(1)); - prepare_message(origin, call) - } - - fn prepare_source_message( - call: Call, - ) -> as MessageDispatch< - AccountId, - ::BridgeMessageId, - >>::Message { - let origin = CallOrigin::SourceAccount(1); - prepare_message(origin, call) - } - - #[test] - fn should_fail_on_spec_version_mismatch() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - const BAD_SPEC_VERSION: SpecVersion = 99; - let mut message = prepare_root_message(Call::System(frame_system::Call::remark { - remark: vec![1, 2, 3], - })); - let weight = message.weight; - message.spec_version = BAD_SPEC_VERSION; - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert_eq!(result.unspent_weight, weight); - assert!(!result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageVersionSpecMismatch { - source_chain: SOURCE_CHAIN_ID, - id, - expected_version: TEST_SPEC_VERSION, - passed_version: BAD_SPEC_VERSION - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_fail_on_weight_mismatch() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - let call = Call::System(frame_system::Call::set_heap_pages { pages: 42 }); - let call_weight = call.get_dispatch_info().weight; - let mut message = prepare_root_message(call); - message.weight = 7; - assert!(call_weight > 7, "needed for test to actually trigger a weight mismatch"); - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert_eq!(result.unspent_weight, 7); - assert!(!result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageWeightMismatch { - source_chain: SOURCE_CHAIN_ID, - id, - expected_weight: call_weight, - passed_weight: 7, - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_fail_on_signature_mismatch() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let call_origin = CallOrigin::TargetAccount(1, TestAccountPublic(1), TestSignature(99)); - let message = prepare_message( - call_origin, - Call::System(frame_system::Call::remark { remark: vec![1, 2, 3] }), - ); - let weight = message.weight; - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert_eq!(result.unspent_weight, weight); - assert!(!result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageSignatureMismatch { - source_chain: SOURCE_CHAIN_ID, - id - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_emit_event_for_rejected_messages() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - System::set_block_number(1); - Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Err(()), - |_, _| unreachable!(), - ); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch(call_dispatch::Event::::MessageRejected { - source_chain: SOURCE_CHAIN_ID, - id - }), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_fail_on_call_decode() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let mut message = prepare_root_message(Call::System(frame_system::Call::remark { - remark: vec![1, 2, 3], - })); - let weight = message.weight; - message.call.0 = vec![]; - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert_eq!(result.unspent_weight, weight); - assert!(!result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageCallDecodeFailed { - source_chain: SOURCE_CHAIN_ID, - id - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_emit_event_for_rejected_calls() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let call = - Call::System(frame_system::Call::fill_block { ratio: Perbill::from_percent(75) }); - let weight = call.get_dispatch_info().weight; - let mut message = prepare_root_message(call); - message.weight = weight; - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert_eq!(result.unspent_weight, weight); - assert!(!result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageCallRejected { - source_chain: SOURCE_CHAIN_ID, - id - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_emit_event_for_unpaid_calls() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let mut message = prepare_root_message(Call::System(frame_system::Call::remark { - remark: vec![1, 2, 3], - })); - let weight = message.weight; - message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; - - System::set_block_number(1); - let result = - Dispatch::dispatch(SOURCE_CHAIN_ID, TARGET_CHAIN_ID, id, Ok(message), |_, _| { - Err(()) - }); - assert_eq!(result.unspent_weight, weight); - assert!(!result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageDispatchPaymentFailed { - source_chain: SOURCE_CHAIN_ID, - id, - origin_account: AccountIdConverter::convert(derive_account_id::< - AccountId, - >( - SOURCE_CHAIN_ID, - SourceAccount::Root - )), - weight: TEST_WEIGHT, - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_dispatch_calls_paid_at_target_chain() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let mut message = prepare_root_message(Call::System(frame_system::Call::remark { - remark: vec![1, 2, 3], - })); - message.dispatch_fee_payment = DispatchFeePayment::AtTargetChain; - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| Ok(()), - ); - assert!(result.dispatch_fee_paid_during_dispatch); - assert!(result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageDispatched { - source_chain: SOURCE_CHAIN_ID, - id, - result: Ok(()) - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_return_dispatch_failed_flag_if_dispatch_happened_but_failed() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let call = Call::System(frame_system::Call::set_heap_pages { pages: 1 }); - let message = prepare_target_message(call); - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert!(!result.dispatch_fee_paid_during_dispatch); - assert!(!result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageDispatched { - source_chain: SOURCE_CHAIN_ID, - id, - result: Err(sp_runtime::DispatchError::BadOrigin) - } - ), - topics: vec![], - }], - ); - }) - } - - #[test] - fn should_dispatch_bridge_message_from_root_origin() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - let message = prepare_root_message(Call::System(frame_system::Call::remark { - remark: vec![1, 2, 3], - })); - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert!(!result.dispatch_fee_paid_during_dispatch); - assert!(result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageDispatched { - source_chain: SOURCE_CHAIN_ID, - id, - result: Ok(()) - } - ), - topics: vec![], - }], - ); - }); - } - - #[test] - fn should_dispatch_bridge_message_from_target_origin() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let call = Call::System(frame_system::Call::remark { remark: vec![] }); - let message = prepare_target_message(call); - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert!(!result.dispatch_fee_paid_during_dispatch); - assert!(result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageDispatched { - source_chain: SOURCE_CHAIN_ID, - id, - result: Ok(()) - } - ), - topics: vec![], - }], - ); - }) - } - - #[test] - fn should_dispatch_bridge_message_from_source_origin() { - new_test_ext().execute_with(|| { - let id = [0; 4]; - - let call = Call::System(frame_system::Call::remark { remark: vec![] }); - let message = prepare_source_message(call); - - System::set_block_number(1); - let result = Dispatch::dispatch( - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - id, - Ok(message), - |_, _| unreachable!(), - ); - assert!(!result.dispatch_fee_paid_during_dispatch); - assert!(result.dispatch_result); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: Event::Dispatch( - call_dispatch::Event::::MessageDispatched { - source_chain: SOURCE_CHAIN_ID, - id, - result: Ok(()) - } - ), - topics: vec![], - }], - ); - }) - } - - #[test] - fn origin_is_checked_when_verifying_sending_message_using_source_root_account() { - let call = Call::System(frame_system::Call::remark { remark: vec![] }); - let message = prepare_root_message(call); - - // When message is sent by Root, CallOrigin::SourceRoot is allowed - assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(None))); - - // when message is sent by some real account, CallOrigin::SourceRoot is not allowed - assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Err(BadOrigin))); - } - - #[test] - fn origin_is_checked_when_verifying_sending_message_using_target_account() { - let call = Call::System(frame_system::Call::remark { remark: vec![] }); - let message = prepare_target_message(call); - - // When message is sent by Root, CallOrigin::TargetAccount is not allowed - assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Err(BadOrigin))); - - // When message is sent by some other account, it is rejected - assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin))); - - // When message is sent by a real account, it is allowed to have origin - // CallOrigin::TargetAccount - assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1)))); - } - - #[test] - fn origin_is_checked_when_verifying_sending_message_using_source_account() { - let call = Call::System(frame_system::Call::remark { remark: vec![] }); - let message = prepare_source_message(call); - - // Sending a message from the expected origin account works - assert!(matches!(verify_message_origin(&RawOrigin::Signed(1), &message), Ok(Some(1)))); - - // If we send a message from a different account, it is rejected - assert!(matches!(verify_message_origin(&RawOrigin::Signed(2), &message), Err(BadOrigin))); - - // The Root account is allowed to assume any expected origin account - assert!(matches!(verify_message_origin(&RawOrigin::Root, &message), Ok(Some(1)))); - } -} diff --git a/polkadot/bridges/modules/grandpa/Cargo.toml b/polkadot/bridges/modules/grandpa/Cargo.toml deleted file mode 100644 index 335a863438a..00000000000 --- a/polkadot/bridges/modules/grandpa/Cargo.toml +++ /dev/null @@ -1,62 +0,0 @@ -[package] -name = "pallet-bridge-grandpa" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -finality-grandpa = { version = "0.16.0", default-features = false } -log = { version = "0.4.14", default-features = false } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } - -# Bridge Dependencies - -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -# Optional Benchmarking Dependencies -bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-runtime/std", - "bp-test-utils/std", - "codec/std", - "finality-grandpa/std", - "frame-support/std", - "frame-system/std", - "log/std", - "num-traits/std", - "scale-info/std", - "serde", - "sp-finality-grandpa/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", -] -runtime-benchmarks = [ - "bp-test-utils", - "frame-benchmarking/runtime-benchmarks", -] diff --git a/polkadot/bridges/modules/grandpa/src/benchmarking.rs b/polkadot/bridges/modules/grandpa/src/benchmarking.rs deleted file mode 100644 index 46e1e41a870..00000000000 --- a/polkadot/bridges/modules/grandpa/src/benchmarking.rs +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Benchmarks for the GRANDPA Pallet. -//! -//! The main dispatchable for the GRANDPA pallet is `submit_finality_proof`, so these benchmarks are -//! based around that. There are to main factors which affect finality proof verification: -//! -//! 1. The number of `votes-ancestries` in the justification -//! 2. The number of `pre-commits` in the justification -//! -//! Vote ancestries are the headers between (`finality_target`, `head_of_chain`], where -//! `header_of_chain` is a descendant of `finality_target`. -//! -//! Pre-commits are messages which are signed by validators at the head of the chain they think is -//! the best. -//! -//! Consider the following: -//! -//! / [B'] <- [C'] -//! [A] <- [B] <- [C] -//! -//! The common ancestor of both forks is block A, so this is what GRANDPA will finalize. In order to -//! verify this we will have vote ancestries of `[B, C, B', C']` and pre-commits `[C, C']`. -//! -//! Note that the worst case scenario here would be a justification where each validator has it's -//! own fork which is `SESSION_LENGTH` blocks long. - -use crate::*; - -use bp_test_utils::{ - accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, - TEST_GRANDPA_SET_ID, -}; -use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; -use frame_support::traits::Get; -use frame_system::RawOrigin; -use sp_finality_grandpa::AuthorityId; -use sp_runtime::traits::Zero; -use sp_std::vec::Vec; - -// The maximum number of vote ancestries to include in a justification. -// -// In practice this would be limited by the session length (number of blocks a single authority set -// can produce) of a given chain. -const MAX_VOTE_ANCESTRIES: u32 = 1000; - -// The maximum number of pre-commits to include in a justification. In practice this scales with the -// number of validators. -const MAX_VALIDATOR_SET_SIZE: u32 = 1024; - -/// Returns number of first header to be imported. -/// -/// Since we bootstrap the pallet with `HeadersToKeep` already imported headers, -/// this function computes the next expected header number to import. -fn header_number, I: 'static, N: From>() -> N { - (T::HeadersToKeep::get() + 1).into() -} - -/// Prepare header and its justification to submit using `submit_finality_proof`. -fn prepare_benchmark_data, I: 'static>( - precommits: u32, - ancestors: u32, -) -> (BridgedHeader, GrandpaJustification>) { - let authority_list = accounts(precommits as u16) - .iter() - .map(|id| (AuthorityId::from(*id), 1)) - .collect::>(); - - let init_data = InitializationData { - header: Box::new(bp_test_utils::test_header(Zero::zero())), - authority_list, - set_id: TEST_GRANDPA_SET_ID, - is_halted: false, - }; - - bootstrap_bridge::(init_data); - - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); - let params = JustificationGeneratorParams { - header: header.clone(), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: accounts(precommits as u16).iter().map(|k| (*k, 1)).collect::>(), - ancestors, - forks: 1, - }; - let justification = make_justification_for_header(params); - (header, justification) -} - -benchmarks_instance_pallet! { - // This is the "gold standard" benchmark for this extrinsic, and it's what should be used to - // annotate the weight in the pallet. - submit_finality_proof { - let p in 1..MAX_VALIDATOR_SET_SIZE; - let v in 1..MAX_VOTE_ANCESTRIES; - let caller: T::AccountId = whitelisted_caller(); - let (header, justification) = prepare_benchmark_data::(p, v); - }: submit_finality_proof(RawOrigin::Signed(caller), Box::new(header), justification) - verify { - let header: BridgedHeader = bp_test_utils::test_header(header_number::()); - let expected_hash = header.hash(); - - assert_eq!(>::get(), expected_hash); - assert!(>::contains_key(expected_hash)); - } -} diff --git a/polkadot/bridges/modules/grandpa/src/lib.rs b/polkadot/bridges/modules/grandpa/src/lib.rs deleted file mode 100644 index 9b04f3635c4..00000000000 --- a/polkadot/bridges/modules/grandpa/src/lib.rs +++ /dev/null @@ -1,1165 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate GRANDPA Pallet -//! -//! This pallet is an on-chain GRANDPA light client for Substrate based chains. -//! -//! This pallet achieves this by trustlessly verifying GRANDPA finality proofs on-chain. Once -//! verified, finalized headers are stored in the pallet, thereby creating a sparse header chain. -//! This sparse header chain can be used as a source of truth for other higher-level applications. -//! -//! The pallet is responsible for tracking GRANDPA validator set hand-offs. We only import headers -//! with justifications signed by the current validator set we know of. The header is inspected for -//! a `ScheduledChanges` digest item, which is then used to update to next validator set. -//! -//! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only -//! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe -//! bug causing resulting in an equivocation. Such events are outside the scope of this pallet. -//! Shall the fork occur on the bridged chain governance intervention will be required to -//! re-initialize the bridge and track the right fork. - -#![cfg_attr(not(feature = "std"), no_std)] -// Runtime-generated enums -#![allow(clippy::large_enum_variant)] - -use crate::weights::WeightInfo; - -use bp_header_chain::{justification::GrandpaJustification, InitializationData}; -use bp_runtime::{BlockNumberOf, Chain, HashOf, HasherOf, HeaderOf}; -use finality_grandpa::voter_set::VoterSet; -use frame_support::{ensure, fail}; -use frame_system::{ensure_signed, RawOrigin}; -use sp_finality_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; -use sp_runtime::traits::{BadOrigin, Header as HeaderT, Zero}; -use sp_std::boxed::Box; - -#[cfg(test)] -mod mock; - -/// Pallet containing weights for this pallet. -pub mod weights; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -// Re-export in crate namespace for `construct_runtime!` -pub use pallet::*; - -/// Block number of the bridged chain. -pub type BridgedBlockNumber = BlockNumberOf<>::BridgedChain>; -/// Block hash of the bridged chain. -pub type BridgedBlockHash = HashOf<>::BridgedChain>; -/// Hasher of the bridged chain. -pub type BridgedBlockHasher = HasherOf<>::BridgedChain>; -/// Header of the bridged chain. -pub type BridgedHeader = HeaderOf<>::BridgedChain>; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The chain we are bridging to here. - type BridgedChain: Chain; - - /// The upper bound on the number of requests allowed by the pallet. - /// - /// A request refers to an action which writes a header to storage. - /// - /// Once this bound is reached the pallet will not allow any dispatchables to be called - /// until the request count has decreased. - #[pallet::constant] - type MaxRequests: Get; - - /// Maximal number of finalized headers to keep in the storage. - /// - /// The setting is there to prevent growing the on-chain state indefinitely. Note - /// the setting does not relate to block numbers - we will simply keep as much items - /// in the storage, so it doesn't guarantee any fixed timeframe for finality headers. - #[pallet::constant] - type HeadersToKeep: Get; - - /// Weights gathered through benchmarking. - type WeightInfo: WeightInfo; - } - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { - fn on_initialize(_n: T::BlockNumber) -> frame_support::weights::Weight { - >::mutate(|count| *count = count.saturating_sub(1)); - - (0_u64) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - } - - #[pallet::call] - impl, I: 'static> Pallet { - /// Verify a target header is finalized according to the given finality proof. - /// - /// It will use the underlying storage pallet to fetch information about the current - /// authorities and best finalized header in order to verify that the header is finalized. - /// - /// If successful in verification, it will write the target header to the underlying storage - /// pallet. - #[pallet::weight(T::WeightInfo::submit_finality_proof( - justification.commit.precommits.len().try_into().unwrap_or(u32::MAX), - justification.votes_ancestries.len().try_into().unwrap_or(u32::MAX), - ))] - pub fn submit_finality_proof( - origin: OriginFor, - finality_target: Box>, - justification: GrandpaJustification>, - ) -> DispatchResultWithPostInfo { - ensure_operational::()?; - let _ = ensure_signed(origin)?; - - ensure!(Self::request_count() < T::MaxRequests::get(), >::TooManyRequests); - - let (hash, number) = (finality_target.hash(), finality_target.number()); - log::trace!(target: "runtime::bridge-grandpa", "Going to try and finalize header {:?}", finality_target); - - let best_finalized = match >::get(>::get()) { - Some(best_finalized) => best_finalized, - None => { - log::error!( - target: "runtime::bridge-grandpa", - "Cannot finalize header {:?} because pallet is not yet initialized", - finality_target, - ); - fail!(>::NotInitialized); - }, - }; - - // We do a quick check here to ensure that our header chain is making progress and isn't - // "travelling back in time" (which could be indicative of something bad, e.g a - // hard-fork). - ensure!(best_finalized.number() < number, >::OldHeader); - - let authority_set = >::get(); - let set_id = authority_set.set_id; - verify_justification::(&justification, hash, *number, authority_set)?; - - let is_authorities_change_enacted = - try_enact_authority_change::(&finality_target, set_id)?; - >::mutate(|count| *count += 1); - insert_header::(*finality_target, hash); - log::info!(target: "runtime::bridge-grandpa", "Succesfully imported finalized header with hash {:?}!", hash); - - // mandatory header is a header that changes authorities set. The pallet can't go - // further without importing this header. So every bridge MUST import mandatory headers. - // - // We don't want to charge extra costs for mandatory operations. So relayer is not - // paying fee for mandatory headers import transactions. - let is_mandatory_header = is_authorities_change_enacted; - let pays_fee = if is_mandatory_header { Pays::No } else { Pays::Yes }; - - Ok(pays_fee.into()) - } - - /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. - /// - /// The initial configuration provided does not need to be the genesis header of the bridged - /// chain, it can be any arbitrary header. You can also provide the next scheduled set - /// change if it is already know. - /// - /// This function is only allowed to be called from a trusted origin and writes to storage - /// with practically no checks in terms of the validity of the data. It is important that - /// you ensure that valid data is being passed in. - #[pallet::weight((T::DbWeight::get().reads_writes(2, 5), DispatchClass::Operational))] - pub fn initialize( - origin: OriginFor, - init_data: super::InitializationData>, - ) -> DispatchResultWithPostInfo { - ensure_owner_or_root::(origin)?; - - let init_allowed = !>::exists(); - ensure!(init_allowed, >::AlreadyInitialized); - initialize_bridge::(init_data.clone()); - - log::info!( - target: "runtime::bridge-grandpa", - "Pallet has been initialized with the following parameters: {:?}", - init_data - ); - - Ok(().into()) - } - - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner( - origin: OriginFor, - new_owner: Option, - ) -> DispatchResultWithPostInfo { - ensure_owner_or_root::(origin)?; - match new_owner { - Some(new_owner) => { - PalletOwner::::put(&new_owner); - log::info!(target: "runtime::bridge-grandpa", "Setting pallet Owner to: {:?}", new_owner); - }, - None => { - PalletOwner::::kill(); - log::info!(target: "runtime::bridge-grandpa", "Removed Owner of pallet."); - }, - } - - Ok(().into()) - } - - /// Halt or resume all pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operational( - origin: OriginFor, - operational: bool, - ) -> DispatchResultWithPostInfo { - ensure_owner_or_root::(origin)?; - >::put(!operational); - - if operational { - log::info!(target: "runtime::bridge-grandpa", "Resuming pallet operations."); - } else { - log::warn!(target: "runtime::bridge-grandpa", "Stopping pallet operations."); - } - - Ok(().into()) - } - } - - /// The current number of requests which have written to storage. - /// - /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until - /// the request capacity is increased. - /// - /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure - /// that the pallet can always make progress. - #[pallet::storage] - #[pallet::getter(fn request_count)] - pub(super) type RequestCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; - - /// Hash of the header used to bootstrap the pallet. - #[pallet::storage] - pub(super) type InitialHash, I: 'static = ()> = - StorageValue<_, BridgedBlockHash, ValueQuery>; - - /// Hash of the best finalized header. - #[pallet::storage] - pub(super) type BestFinalized, I: 'static = ()> = - StorageValue<_, BridgedBlockHash, ValueQuery>; - - /// A ring buffer of imported hashes. Ordered by the insertion time. - #[pallet::storage] - pub(super) type ImportedHashes, I: 'static = ()> = - StorageMap<_, Identity, u32, BridgedBlockHash>; - - /// Current ring buffer position. - #[pallet::storage] - pub(super) type ImportedHashesPointer, I: 'static = ()> = - StorageValue<_, u32, ValueQuery>; - - /// Headers which have been imported into the pallet. - #[pallet::storage] - pub(super) type ImportedHeaders, I: 'static = ()> = - StorageMap<_, Identity, BridgedBlockHash, BridgedHeader>; - - /// The current GRANDPA Authority set. - #[pallet::storage] - pub(super) type CurrentAuthoritySet, I: 'static = ()> = - StorageValue<_, bp_header_chain::AuthoritySet, ValueQuery>; - - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - #[pallet::storage] - pub type PalletOwner, I: 'static = ()> = - StorageValue<_, T::AccountId, OptionQuery>; - - /// If true, all pallet transactions are failed immediately. - #[pallet::storage] - pub(super) type IsHalted, I: 'static = ()> = StorageValue<_, bool, ValueQuery>; - - #[pallet::genesis_config] - pub struct GenesisConfig, I: 'static = ()> { - /// Optional module owner account. - pub owner: Option, - /// Optional module initialization data. - pub init_data: Option>>, - } - - #[cfg(feature = "std")] - impl, I: 'static> Default for GenesisConfig { - fn default() -> Self { - Self { owner: None, init_data: None } - } - } - - #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { - fn build(&self) { - if let Some(ref owner) = self.owner { - >::put(owner); - } - - if let Some(init_data) = self.init_data.clone() { - initialize_bridge::(init_data); - } else { - // Since the bridge hasn't been initialized we shouldn't allow anyone to perform - // transactions. - >::put(true); - } - } - } - - #[pallet::error] - pub enum Error { - /// The given justification is invalid for the given header. - InvalidJustification, - /// The authority set from the underlying header chain is invalid. - InvalidAuthoritySet, - /// There are too many requests for the current window to handle. - TooManyRequests, - /// The header being imported is older than the best finalized header known to the pallet. - OldHeader, - /// The header is unknown to the pallet. - UnknownHeader, - /// The scheduled authority set change found in the header is unsupported by the pallet. - /// - /// This is the case for non-standard (e.g forced) authority set changes. - UnsupportedScheduledChange, - /// The pallet is not yet initialized. - NotInitialized, - /// The pallet has already been initialized. - AlreadyInitialized, - /// All pallet operations are halted. - Halted, - /// The storage proof doesn't contains storage root. So it is invalid for given header. - StorageRootMismatch, - } - - /// Check the given header for a GRANDPA scheduled authority set change. If a change - /// is found it will be enacted immediately. - /// - /// This function does not support forced changes, or scheduled changes with delays - /// since these types of changes are indicative of abnormal behavior from GRANDPA. - /// - /// Returned value will indicate if a change was enacted or not. - pub(crate) fn try_enact_authority_change, I: 'static>( - header: &BridgedHeader, - current_set_id: sp_finality_grandpa::SetId, - ) -> Result { - let mut change_enacted = false; - - // We don't support forced changes - at that point governance intervention is required. - ensure!( - super::find_forced_change(header).is_none(), - >::UnsupportedScheduledChange - ); - - if let Some(change) = super::find_scheduled_change(header) { - // GRANDPA only includes a `delay` for forced changes, so this isn't valid. - ensure!(change.delay == Zero::zero(), >::UnsupportedScheduledChange); - - // TODO [#788]: Stop manually increasing the `set_id` here. - let next_authorities = bp_header_chain::AuthoritySet { - authorities: change.next_authorities, - set_id: current_set_id + 1, - }; - - // Since our header schedules a change and we know the delay is 0, it must also enact - // the change. - >::put(&next_authorities); - change_enacted = true; - - log::info!( - target: "runtime::bridge-grandpa", - "Transitioned from authority set {} to {}! New authorities are: {:?}", - current_set_id, - current_set_id + 1, - next_authorities, - ); - }; - - Ok(change_enacted) - } - - /// Verify a GRANDPA justification (finality proof) for a given header. - /// - /// Will use the GRANDPA current authorities known to the pallet. - /// - /// If successful it returns the decoded GRANDPA justification so we can refund any weight which - /// was overcharged in the initial call. - pub(crate) fn verify_justification, I: 'static>( - justification: &GrandpaJustification>, - hash: BridgedBlockHash, - number: BridgedBlockNumber, - authority_set: bp_header_chain::AuthoritySet, - ) -> Result<(), sp_runtime::DispatchError> { - use bp_header_chain::justification::verify_justification; - - let voter_set = - VoterSet::new(authority_set.authorities).ok_or(>::InvalidAuthoritySet)?; - let set_id = authority_set.set_id; - - Ok(verify_justification::>( - (hash, number), - set_id, - &voter_set, - justification, - ) - .map_err(|e| { - log::error!( - target: "runtime::bridge-grandpa", - "Received invalid justification for {:?}: {:?}", - hash, - e, - ); - >::InvalidJustification - })?) - } - - /// Import a previously verified header to the storage. - /// - /// Note this function solely takes care of updating the storage and pruning old entries, - /// but does not verify the validity of such import. - pub(crate) fn insert_header, I: 'static>( - header: BridgedHeader, - hash: BridgedBlockHash, - ) { - let index = >::get(); - let pruning = >::try_get(index); - >::put(hash); - >::insert(hash, header); - >::insert(index, hash); - - // Update ring buffer pointer and remove old header. - >::put((index + 1) % T::HeadersToKeep::get()); - if let Ok(hash) = pruning { - log::debug!(target: "runtime::bridge-grandpa", "Pruning old header: {:?}.", hash); - >::remove(hash); - } - } - - /// Since this writes to storage with no real checks this should only be used in functions that - /// were called by a trusted origin. - pub(crate) fn initialize_bridge, I: 'static>( - init_params: super::InitializationData>, - ) { - let super::InitializationData { header, authority_list, set_id, is_halted } = init_params; - - let initial_hash = header.hash(); - >::put(initial_hash); - >::put(0); - insert_header::(*header, initial_hash); - - let authority_set = bp_header_chain::AuthoritySet::new(authority_list, set_id); - >::put(authority_set); - - >::put(is_halted); - } - - #[cfg(feature = "runtime-benchmarks")] - pub(crate) fn bootstrap_bridge, I: 'static>( - init_params: super::InitializationData>, - ) { - let start_number = *init_params.header.number(); - let end_number = start_number + T::HeadersToKeep::get().into(); - initialize_bridge::(init_params); - - let mut number = start_number; - while number < end_number { - number = number + sp_runtime::traits::One::one(); - let header = >::new( - number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - let hash = header.hash(); - insert_header::(header, hash); - } - } - - /// Ensure that the origin is either root, or `PalletOwner`. - fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { - match origin.into() { - Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) - if Some(signer) == >::get().as_ref() => - Ok(()), - _ => Err(BadOrigin), - } - } - - /// Ensure that the pallet is in operational mode (not halted). - fn ensure_operational, I: 'static>() -> Result<(), Error> { - if >::get() { - Err(>::Halted) - } else { - Ok(()) - } - } -} - -impl, I: 'static> Pallet { - /// Get the best finalized header the pallet knows of. - /// - /// Returns a dummy header if there is no best header. This can only happen - /// if the pallet has not been initialized yet. - pub fn best_finalized() -> BridgedHeader { - let hash = >::get(); - >::get(hash).unwrap_or_else(|| { - >::new( - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - }) - } - - /// Check if a particular header is known to the bridge pallet. - pub fn is_known_header(hash: BridgedBlockHash) -> bool { - >::contains_key(hash) - } - - /// Verify that the passed storage proof is valid, given it is crafted using - /// known finalized header. If the proof is valid, then the `parse` callback - /// is called and the function returns its result. - pub fn parse_finalized_storage_proof( - hash: BridgedBlockHash, - storage_proof: sp_trie::StorageProof, - parse: impl FnOnce(bp_runtime::StorageProofChecker>) -> R, - ) -> Result { - let header = >::get(hash).ok_or(Error::::UnknownHeader)?; - let storage_proof_checker = - bp_runtime::StorageProofChecker::new(*header.state_root(), storage_proof) - .map_err(|_| Error::::StorageRootMismatch)?; - - Ok(parse(storage_proof_checker)) - } -} - -pub(crate) fn find_scheduled_change( - header: &H, -) -> Option> { - use sp_runtime::generic::OpaqueDigestItemId; - - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - - let filter_log = |log: ConsensusLog| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }; - - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) -} - -/// Checks the given header for a consensus digest signaling a **forced** scheduled change and -/// extracts it. -pub(crate) fn find_forced_change( - header: &H, -) -> Option<(H::Number, sp_finality_grandpa::ScheduledChange)> { - use sp_runtime::generic::OpaqueDigestItemId; - - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - - let filter_log = |log: ConsensusLog| match log { - ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), - _ => None, - }; - - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) -} - -/// (Re)initialize bridge with given header for using it in `pallet-bridge-messages` benchmarks. -#[cfg(feature = "runtime-benchmarks")] -pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader) { - initialize_bridge::(InitializationData { - header: Box::new(header), - authority_list: sp_std::vec::Vec::new(), /* we don't verify any proofs in external - * benchmarks */ - set_id: 0, - is_halted: false, - }); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{run_test, test_header, Origin, TestHeader, TestNumber, TestRuntime}; - use bp_test_utils::{ - authority_list, make_default_justification, make_justification_for_header, - JustificationGeneratorParams, ALICE, BOB, - }; - use codec::Encode; - use frame_support::{ - assert_err, assert_noop, assert_ok, storage::generator::StorageValue, - weights::PostDispatchInfo, - }; - use sp_runtime::{Digest, DigestItem, DispatchError}; - - fn initialize_substrate_bridge() { - assert_ok!(init_with_origin(Origin::root())); - } - - fn init_with_origin( - origin: Origin, - ) -> Result< - InitializationData, - sp_runtime::DispatchErrorWithPostInfo, - > { - let genesis = test_header(0); - - let init_data = InitializationData { - header: Box::new(genesis), - authority_list: authority_list(), - set_id: 1, - is_halted: false, - }; - - Pallet::::initialize(origin, init_data.clone()).map(|_| init_data) - } - - fn submit_finality_proof(header: u8) -> frame_support::dispatch::DispatchResultWithPostInfo { - let header = test_header(header.into()); - let justification = make_default_justification(&header); - - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header), - justification, - ) - } - - fn next_block() { - use frame_support::traits::OnInitialize; - - let current_number = frame_system::Pallet::::block_number(); - frame_system::Pallet::::set_block_number(current_number + 1); - let _ = Pallet::::on_initialize(current_number); - } - - fn change_log(delay: u64) -> Digest { - let consensus_log = - ConsensusLog::::ScheduledChange(sp_finality_grandpa::ScheduledChange { - next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], - delay, - }); - - Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } - } - - fn forced_change_log(delay: u64) -> Digest { - let consensus_log = ConsensusLog::::ForcedChange( - delay, - sp_finality_grandpa::ScheduledChange { - next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], - delay, - }, - ); - - Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } - } - - #[test] - fn init_root_or_owner_origin_can_initialize_pallet() { - run_test(|| { - assert_noop!(init_with_origin(Origin::signed(1)), DispatchError::BadOrigin); - assert_ok!(init_with_origin(Origin::root())); - - // Reset storage so we can initialize the pallet again - BestFinalized::::kill(); - PalletOwner::::put(2); - assert_ok!(init_with_origin(Origin::signed(2))); - }) - } - - #[test] - fn init_storage_entries_are_correctly_initialized() { - run_test(|| { - assert_eq!( - BestFinalized::::get(), - BridgedBlockHash::::default() - ); - assert_eq!(Pallet::::best_finalized(), test_header(0)); - - let init_data = init_with_origin(Origin::root()).unwrap(); - - assert!(>::contains_key(init_data.header.hash())); - assert_eq!(BestFinalized::::get(), init_data.header.hash()); - assert_eq!( - CurrentAuthoritySet::::get().authorities, - init_data.authority_list - ); - assert!(!IsHalted::::get()); - }) - } - - #[test] - fn init_can_only_initialize_pallet_once() { - run_test(|| { - initialize_substrate_bridge(); - assert_noop!( - init_with_origin(Origin::root()), - >::AlreadyInitialized - ); - }) - } - - #[test] - fn pallet_owner_may_change_owner() { - run_test(|| { - PalletOwner::::put(2); - - assert_ok!(Pallet::::set_owner(Origin::root(), Some(1))); - assert_noop!( - Pallet::::set_operational(Origin::signed(2), false), - DispatchError::BadOrigin, - ); - assert_ok!(Pallet::::set_operational(Origin::root(), false)); - - assert_ok!(Pallet::::set_owner(Origin::signed(1), None)); - assert_noop!( - Pallet::::set_operational(Origin::signed(1), true), - DispatchError::BadOrigin, - ); - assert_noop!( - Pallet::::set_operational(Origin::signed(2), true), - DispatchError::BadOrigin, - ); - assert_ok!(Pallet::::set_operational(Origin::root(), true)); - }); - } - - #[test] - fn pallet_may_be_halted_by_root() { - run_test(|| { - assert_ok!(Pallet::::set_operational(Origin::root(), false)); - assert_ok!(Pallet::::set_operational(Origin::root(), true)); - }); - } - - #[test] - fn pallet_may_be_halted_by_owner() { - run_test(|| { - PalletOwner::::put(2); - - assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); - assert_ok!(Pallet::::set_operational(Origin::signed(2), true)); - - assert_noop!( - Pallet::::set_operational(Origin::signed(1), false), - DispatchError::BadOrigin, - ); - assert_noop!( - Pallet::::set_operational(Origin::signed(1), true), - DispatchError::BadOrigin, - ); - - assert_ok!(Pallet::::set_operational(Origin::signed(2), false)); - assert_noop!( - Pallet::::set_operational(Origin::signed(1), true), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - initialize_substrate_bridge(); - - assert_ok!(Pallet::::set_operational(Origin::root(), false)); - assert_noop!(submit_finality_proof(1), Error::::Halted); - - assert_ok!(Pallet::::set_operational(Origin::root(), true)); - assert_ok!(submit_finality_proof(1)); - }) - } - - #[test] - fn pallet_rejects_header_if_not_initialized_yet() { - run_test(|| { - assert_noop!(submit_finality_proof(1), Error::::NotInitialized); - }); - } - - #[test] - fn succesfully_imports_header_with_valid_finality() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!( - submit_finality_proof(1), - PostDispatchInfo { - actual_weight: None, - pays_fee: frame_support::weights::Pays::Yes, - }, - ); - - let header = test_header(1); - assert_eq!(>::get(), header.hash()); - assert!(>::contains_key(header.hash())); - }) - } - - #[test] - fn rejects_justification_that_skips_authority_set_transition() { - run_test(|| { - initialize_substrate_bridge(); - - let header = test_header(1); - - let params = - JustificationGeneratorParams:: { set_id: 2, ..Default::default() }; - let justification = make_justification_for_header(params); - - assert_err!( - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header), - justification, - ), - >::InvalidJustification - ); - }) - } - - #[test] - fn does_not_import_header_with_invalid_finality_proof() { - run_test(|| { - initialize_substrate_bridge(); - - let header = test_header(1); - let mut justification = make_default_justification(&header); - justification.round = 42; - - assert_err!( - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header), - justification, - ), - >::InvalidJustification - ); - }) - } - - #[test] - fn disallows_invalid_authority_set() { - run_test(|| { - let genesis = test_header(0); - - let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)]; - let init_data = InitializationData { - header: Box::new(genesis), - authority_list: invalid_authority_list, - set_id: 1, - is_halted: false, - }; - - assert_ok!(Pallet::::initialize(Origin::root(), init_data)); - - let header = test_header(1); - let justification = make_default_justification(&header); - - assert_err!( - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header), - justification, - ), - >::InvalidAuthoritySet - ); - }) - } - - #[test] - fn importing_header_ensures_that_chain_is_extended() { - run_test(|| { - initialize_substrate_bridge(); - - assert_ok!(submit_finality_proof(4)); - assert_err!(submit_finality_proof(3), Error::::OldHeader); - assert_ok!(submit_finality_proof(5)); - }) - } - - #[test] - fn importing_header_enacts_new_authority_set() { - run_test(|| { - initialize_substrate_bridge(); - - let next_set_id = 2; - let next_authorities = vec![(ALICE.into(), 1), (BOB.into(), 1)]; - - // Need to update the header digest to indicate that our header signals an authority set - // change. The change will be enacted when we import our header. - let mut header = test_header(2); - header.digest = change_log(0); - - // Create a valid justification for the header - let justification = make_default_justification(&header); - - // Let's import our test header - assert_ok!( - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header.clone()), - justification - ), - PostDispatchInfo { - actual_weight: None, - pays_fee: frame_support::weights::Pays::No, - }, - ); - - // Make sure that our header is the best finalized - assert_eq!(>::get(), header.hash()); - assert!(>::contains_key(header.hash())); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - >::get(), - bp_header_chain::AuthoritySet::new(next_authorities, next_set_id), - ); - }) - } - - #[test] - fn importing_header_rejects_header_with_scheduled_change_delay() { - run_test(|| { - initialize_substrate_bridge(); - - // Need to update the header digest to indicate that our header signals an authority set - // change. However, the change doesn't happen until the next block. - let mut header = test_header(2); - header.digest = change_log(1); - - // Create a valid justification for the header - let justification = make_default_justification(&header); - - // Should not be allowed to import this header - assert_err!( - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header), - justification - ), - >::UnsupportedScheduledChange - ); - }) - } - - #[test] - fn importing_header_rejects_header_with_forced_changes() { - run_test(|| { - initialize_substrate_bridge(); - - // Need to update the header digest to indicate that it signals a forced authority set - // change. - let mut header = test_header(2); - header.digest = forced_change_log(0); - - // Create a valid justification for the header - let justification = make_default_justification(&header); - - // Should not be allowed to import this header - assert_err!( - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header), - justification - ), - >::UnsupportedScheduledChange - ); - }) - } - - #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { - run_test(|| { - assert_noop!( - Pallet::::parse_finalized_storage_proof( - Default::default(), - sp_trie::StorageProof::new(vec![]), - |_| (), - ), - Error::::UnknownHeader, - ); - }); - } - - #[test] - fn parse_finalized_storage_accepts_valid_proof() { - run_test(|| { - let (state_root, storage_proof) = bp_runtime::craft_valid_storage_proof(); - - let mut header = test_header(2); - header.set_state_root(state_root); - - let hash = header.hash(); - >::put(hash); - >::insert(hash, header); - - assert_ok!( - Pallet::::parse_finalized_storage_proof(hash, storage_proof, |_| (),), - (), - ); - }); - } - - #[test] - fn rate_limiter_disallows_imports_once_limit_is_hit_in_single_block() { - run_test(|| { - initialize_substrate_bridge(); - - assert_ok!(submit_finality_proof(1)); - assert_ok!(submit_finality_proof(2)); - assert_err!(submit_finality_proof(3), >::TooManyRequests); - }) - } - - #[test] - fn rate_limiter_invalid_requests_do_not_count_towards_request_count() { - run_test(|| { - let submit_invalid_request = || { - let header = test_header(1); - let mut invalid_justification = make_default_justification(&header); - invalid_justification.round = 42; - - Pallet::::submit_finality_proof( - Origin::signed(1), - Box::new(header), - invalid_justification, - ) - }; - - initialize_substrate_bridge(); - - for _ in 0..::MaxRequests::get() + 1 { - // Notice that the error here *isn't* `TooManyRequests` - assert_err!(submit_invalid_request(), >::InvalidJustification); - } - - // Can still submit `MaxRequests` requests afterwards - assert_ok!(submit_finality_proof(1)); - assert_ok!(submit_finality_proof(2)); - assert_err!(submit_finality_proof(3), >::TooManyRequests); - }) - } - - #[test] - fn rate_limiter_allows_request_after_new_block_has_started() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof(1)); - assert_ok!(submit_finality_proof(2)); - - next_block(); - assert_ok!(submit_finality_proof(3)); - }) - } - - #[test] - fn rate_limiter_disallows_imports_once_limit_is_hit_across_different_blocks() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof(1)); - assert_ok!(submit_finality_proof(2)); - - next_block(); - assert_ok!(submit_finality_proof(3)); - assert_err!(submit_finality_proof(4), >::TooManyRequests); - }) - } - - #[test] - fn rate_limiter_allows_max_requests_after_long_time_with_no_activity() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof(1)); - assert_ok!(submit_finality_proof(2)); - - next_block(); - next_block(); - - next_block(); - assert_ok!(submit_finality_proof(5)); - assert_ok!(submit_finality_proof(7)); - }) - } - - #[test] - fn should_prune_headers_over_headers_to_keep_parameter() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof(1)); - let first_header = Pallet::::best_finalized(); - next_block(); - - assert_ok!(submit_finality_proof(2)); - next_block(); - assert_ok!(submit_finality_proof(3)); - next_block(); - assert_ok!(submit_finality_proof(4)); - next_block(); - assert_ok!(submit_finality_proof(5)); - next_block(); - - assert_ok!(submit_finality_proof(6)); - - assert!( - !Pallet::::is_known_header(first_header.hash()), - "First header should be pruned." - ); - }) - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - IsHalted::::storage_value_final_key().to_vec(), - bp_header_chain::storage_keys::is_halted_key("Grandpa").0, - ); - - assert_eq!( - BestFinalized::::storage_value_final_key().to_vec(), - bp_header_chain::storage_keys::best_finalized_hash_key("Grandpa").0, - ); - } -} diff --git a/polkadot/bridges/modules/grandpa/src/mock.rs b/polkadot/bridges/modules/grandpa/src/mock.rs deleted file mode 100644 index bfc749d5230..00000000000 --- a/polkadot/bridges/modules/grandpa/src/mock.rs +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use bp_runtime::Chain; -use frame_support::{construct_runtime, parameter_types, weights::Weight}; -use sp_core::sr25519::Signature; -use sp_runtime::{ - testing::{Header, H256}, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; -pub type TestHeader = crate::BridgedHeader; -pub type TestNumber = crate::BridgedBlockNumber; - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as grandpa; - -construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Grandpa: grandpa::{Pallet}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type DbWeight = (); - type BlockWeights = (); - type BlockLength = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -parameter_types! { - pub const MaxRequests: u32 = 2; - pub const HeadersToKeep: u32 = 5; - pub const SessionLength: u64 = 5; - pub const NumValidators: u32 = 5; -} - -impl grandpa::Config for TestRuntime { - type BridgedChain = TestBridgedChain; - type MaxRequests = MaxRequests; - type HeadersToKeep = HeadersToKeep; - type WeightInfo = (); -} - -#[derive(Debug)] -pub struct TestBridgedChain; - -impl Chain for TestBridgedChain { - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hashing; - type Header = ::Header; - - type AccountId = AccountId; - type Balance = u64; - type Index = u64; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -pub fn run_test(test: impl FnOnce() -> T) -> T { - sp_io::TestExternalities::new(Default::default()).execute_with(test) -} - -pub fn test_header(num: TestNumber) -> TestHeader { - // We wrap the call to avoid explicit type annotations in our tests - bp_test_utils::test_header(num) -} diff --git a/polkadot/bridges/modules/grandpa/src/weights.rs b/polkadot/bridges/modules/grandpa/src/weights.rs deleted file mode 100644 index 2c4660160a0..00000000000 --- a/polkadot/bridges/modules/grandpa/src/weights.rs +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for `pallet_bridge_grandpa` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-12-28, STEPS: 50, REPEAT: 20 -//! LOW RANGE: [], HIGH RANGE: [] -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled -//! CHAIN: Some("dev"), DB CACHE: 128 - -// Executed Command: -// target/release/millau-bridge-node -// benchmark -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_bridge_grandpa -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/grandpa/src/weights.rs -// --template=./.maintain/millau-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for `pallet_bridge_grandpa`. -pub trait WeightInfo { - fn submit_finality_proof(p: u32, v: u32) -> Weight; -} - -/// Weights for `pallet_bridge_grandpa` using the Millau node and recommended hardware. -pub struct MillauWeight(PhantomData); -impl WeightInfo for MillauWeight { - fn submit_finality_proof(p: u32, v: u32) -> Weight { - (115_651_000 as Weight) - .saturating_add((61_465_000 as Weight).saturating_mul(p as Weight)) - .saturating_add((3_438_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(6 as Weight)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - fn submit_finality_proof(p: u32, v: u32) -> Weight { - (115_651_000 as Weight) - .saturating_add((61_465_000 as Weight).saturating_mul(p as Weight)) - .saturating_add((3_438_000 as Weight).saturating_mul(v as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(6 as Weight)) - } -} diff --git a/polkadot/bridges/modules/messages/Cargo.toml b/polkadot/bridges/modules/messages/Cargo.toml deleted file mode 100644 index 804f323f10b..00000000000 --- a/polkadot/bridges/modules/messages/Cargo.toml +++ /dev/null @@ -1,55 +0,0 @@ -[package] -name = "pallet-bridge-messages" -description = "Module that allows bridged chains to exchange messages using lane concept." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -bitvec = { version = "1", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -log = { version = "0.4.14", default-features = false } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true, features = ["derive"] } - -# Bridge dependencies - -bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-message-dispatch/std", - "bp-messages/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "log/std", - "num-traits/std", - "scale-info/std", - "serde", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", -] diff --git a/polkadot/bridges/modules/messages/README.md b/polkadot/bridges/modules/messages/README.md deleted file mode 100644 index 2dc56296842..00000000000 --- a/polkadot/bridges/modules/messages/README.md +++ /dev/null @@ -1,424 +0,0 @@ -# Messages Module - -The messages module is used to deliver messages from source chain to target chain. Message is -(almost) opaque to the module and the final goal is to hand message to the message dispatch -mechanism. - -## Contents -- [Overview](#overview) -- [Message Workflow](#message-workflow) -- [Integrating Message Lane Module into Runtime](#integrating-messages-module-into-runtime) -- [Non-Essential Functionality](#non-essential-functionality) -- [Weights of Module Extrinsics](#weights-of-module-extrinsics) - -## Overview - -Message lane is an unidirectional channel, where messages are sent from source chain to the target -chain. At the same time, a single instance of messages module supports both outbound lanes and -inbound lanes. So the chain where the module is deployed (this chain), may act as a source chain for -outbound messages (heading to a bridged chain) and as a target chain for inbound messages (coming -from a bridged chain). - -Messages module supports multiple message lanes. Every message lane is identified with a 4-byte -identifier. Messages sent through the lane are assigned unique (for this lane) increasing integer -value that is known as nonce ("number that can only be used once"). Messages that are sent over the -same lane are guaranteed to be delivered to the target chain in the same order they're sent from -the source chain. In other words, message with nonce `N` will be delivered right before delivering a -message with nonce `N+1`. - -Single message lane may be seen as a transport channel for single application (onchain, offchain or -mixed). At the same time the module itself never dictates any lane or message rules. In the end, it -is the runtime developer who defines what message lane and message mean for this runtime. - -## Message Workflow - -The message "appears" when its submitter calls the `send_message()` function of the module. The -submitter specifies the lane that he's willing to use, the message itself and the fee that he's -willing to pay for the message delivery and dispatch. If a message passes all checks, the nonce is -assigned and the message is stored in the module storage. The message is in an "undelivered" state -now. - -We assume that there are external, offchain actors, called relayers, that are submitting module -related transactions to both target and source chains. The pallet itself has no assumptions about -relayers incentivization scheme, but it has some callbacks for paying rewards. See -[Integrating Messages Module into runtime](#Integrating-Messages-Module-into-runtime) -for details. - -Eventually, some relayer would notice this message in the "undelivered" state and it would decide to -deliver this message. Relayer then crafts `receive_messages_proof()` transaction (aka delivery -transaction) for the messages module instance, deployed at the target chain. Relayer provides -his account id at the source chain, the proof of message (or several messages), the number of -messages in the transaction and their cumulative dispatch weight. Once a transaction is mined, the -message is considered "delivered". - -Once a message is delivered, the relayer may want to confirm delivery back to the source chain. -There are two reasons why he would want to do that. The first is that we intentionally limit number -of "delivered", but not yet "confirmed" messages at inbound lanes -(see [What about other Constants in the Messages Module Configuration Trait](#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) for explanation). -So at some point, the target chain may stop accepting new messages until relayers confirm some of -these. The second is that if the relayer wants to be rewarded for delivery, he must prove the fact -that he has actually delivered the message. And this proof may only be generated after the delivery -transaction is mined. So relayer crafts the `receive_messages_delivery_proof()` transaction (aka -confirmation transaction) for the messages module instance, deployed at the source chain. Once -this transaction is mined, the message is considered "confirmed". - -The "confirmed" state is the final state of the message. But there's one last thing related to the -message - the fact that it is now "confirmed" and reward has been paid to the relayer (or at least -callback for this has been called), must be confirmed to the target chain. Otherwise, we may reach -the limit of "unconfirmed" messages at the target chain and it will stop accepting new messages. So -relayer sometimes includes a nonce of the latest "confirmed" message in the next -`receive_messages_proof()` transaction, proving that some messages have been confirmed. - -## Integrating Messages Module into Runtime - -As it has been said above, the messages module supports both outbound and inbound message lanes. -So if we will integrate a module in some runtime, it may act as the source chain runtime for -outbound messages and as the target chain runtime for inbound messages. In this section, we'll -sometimes refer to the chain we're currently integrating with, as this chain and the other chain as -bridged chain. - -Messages module doesn't simply accept transactions that are claiming that the bridged chain has -some updated data for us. Instead of this, the module assumes that the bridged chain is able to -prove that updated data in some way. The proof is abstracted from the module and may be of any kind. -In our Substrate-to-Substrate bridge we're using runtime storage proofs. Other bridges may use -transaction proofs, Substrate header digests or anything else that may be proved. - -**IMPORTANT NOTE**: everything below in this chapter describes details of the messages module -configuration. But if you interested in well-probed and relatively easy integration of two -Substrate-based chains, you may want to look at the -[bridge-runtime-common](../../bin/runtime-common/README.md) crate. This crate is providing a lot of -helpers for integration, which may be directly used from within your runtime. Then if you'll decide -to change something in this scheme, get back here for detailed information. - -### General Information - -The messages module supports instances. Every module instance is supposed to bridge this chain -and some bridged chain. To bridge with another chain, using another instance is suggested (this -isn't forced anywhere in the code, though). - -Message submitters may track message progress by inspecting module events. When Message is accepted, -the `MessageAccepted` event is emitted in the `send_message()` transaction. The event contains both -message lane identifier and nonce that has been assigned to the message. When a message is delivered -to the target chain, the `MessagesDelivered` event is emitted from the -`receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane -identifier, inclusive range of delivered message nonces and their single-bit dispatch results. - -Please note that the meaning of the 'dispatch result' is determined by the message dispatcher at -the target chain. For example, in case of immediate call dispatcher it will be the `true` if call -has been successfully dispatched and `false` if it has only been delivered. This simple mechanism -built into the messages module allows building basic bridge applications, which only care whether -their messages have been successfully dispatched or not. More sophisticated applications may use -their own dispatch result delivery mechanism to deliver something larger than single bit. - -### How to plug-in Messages Module to Send Messages to the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 3 main associated types that are used to work with -outbound messages. The `pallet_bridge_messages::Config::TargetHeaderChain` defines how we see the -bridged chain as the target for our outbound messages. It must be able to check that the bridged -chain may accept our message - like that the message has size below maximal possible transaction -size of the chain and so on. And when the relayer sends us a confirmation transaction, this -implementation must be able to parse and verify the proof of messages delivery. Normally, you would -reuse the same (configurable) type on all chains that are sending messages to the same bridged -chain. - -The `pallet_bridge_messages::Config::LaneMessageVerifier` defines a single callback to verify outbound -messages. The simplest callback may just accept all messages. But in this case you'll need to answer -many questions first. Who will pay for the delivery and confirmation transaction? Are we sure that -someone will ever deliver this message to the bridged chain? Are we sure that we don't bloat our -runtime storage by accepting this message? What if the message is improperly encoded or has some -fields set to invalid values? Answering all those (and similar) questions would lead to correct -implementation. - -There's another thing to consider when implementing type for use in -`pallet_bridge_messages::Config::LaneMessageVerifier`. It is whether we treat all message lanes -identically, or they'll have different sets of verification rules? For example, you may reserve -lane#1 for messages coming from some 'wrapped-token' pallet - then you may verify in your -implementation that the origin is associated with this pallet. Lane#2 may be reserved for 'system' -messages and you may charge zero fee for such messages. You may have some rate limiting for messages -sent over the lane#3. Or you may just verify the same rules set for all outbound messages - it is -all up to the `pallet_bridge_messages::Config::LaneMessageVerifier` implementation. - -The last type is the `pallet_bridge_messages::Config::MessageDeliveryAndDispatchPayment`. When all -checks are made and we have decided to accept the message, we're calling the -`pay_delivery_and_dispatch_fee()` callback, passing the corresponding argument of the `send_message` -function. Later, when message delivery is confirmed, we're calling `pay_relayers_rewards()` -callback, passing accounts of relayers and messages that they have delivered. The simplest -implementation of this trait is in the [`instant_payments.rs`](./src/instant_payments.rs) module and -simply calls `Currency::transfer()` when those callbacks are called. So `Currency` units are -transferred between submitter, 'relayers fund' and relayers accounts. Other implementations may use -more or less sophisticated techniques - the whole relayers incentivization scheme is not a part of -the messages module. - -### I have a Messages Module in my Runtime, but I Want to Reject all Outbound Messages. What shall I do? - -You should be looking at the `bp_messages::source_chain::ForbidOutboundMessages` structure -[`bp_messages::source_chain`](../../primitives/messages/src/source_chain.rs). It implements -all required traits and will simply reject all transactions, related to outbound messages. - -### How to plug-in Messages Module to Receive Messages from the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with -inbound messages. The `pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the -bridged chain as the source or our inbound messages. When relayer sends us a delivery transaction, -this implementation must be able to parse and verify the proof of messages wrapped in this -transaction. Normally, you would reuse the same (configurable) type on all chains that are sending -messages to the same bridged chain. - -The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered -messages. Apart from actually dispatching the message, the implementation must return the correct -dispatch weight of the message before dispatch is called. - -### I have a Messages Module in my Runtime, but I Want to Reject all Inbound Messages. What -shall I do? - -You should be looking at the `bp_messages::target_chain::ForbidInboundMessages` structure from -the [`bp_messages::target_chain`](../../primitives/messages/src/target_chain.rs) module. It -implements all required traits and will simply reject all transactions, related to inbound messages. - -### What about other Constants in the Messages Module Configuration Trait? - -Message is being stored in the source chain storage until its delivery will be confirmed. After -that, we may safely remove the message from the storage. Lane messages are removed (pruned) when -someone sends a new message using the same lane. So the message submitter pays for that pruning. To -avoid pruning too many messages in a single transaction, there's -`pallet_bridge_messages::Config::MaxMessagesToPruneAtOnce` configuration parameter. We will never prune -more than this number of messages in the single transaction. That said, the value should not be too -big to avoid waste of resources when there are no messages to prune. - -To be able to reward the relayer for delivering messages, we store a map of message nonces range => -identifier of the relayer that has delivered this range at the target chain runtime storage. If a -relayer delivers multiple consequent ranges, they're merged into single entry. So there may be more -than one entry for the same relayer. Eventually, this whole map must be delivered back to the source -chain to confirm delivery and pay rewards. So to make sure we are able to craft this confirmation -transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure that -the weight of processing this map is below a certain limit. Both size and processing weight mostly -depend on the number of entries. The number of entries is limited with the -`pallet_bridge_messages::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight -also depends on the total number of messages that are being confirmed, because every confirmed -message needs to be read. So there's another -`pallet_bridge_messages::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. - -When choosing values for these parameters, you must also keep in mind that if proof in your scheme -is based on finality of headers (and it is the most obvious option for Substrate-based chains with -finality notion), then choosing too small values for these parameters may cause significant delays -in message delivery. That's because there are too many actors involved in this scheme: 1) authorities -that are finalizing headers of the target chain need to finalize header with non-empty map; 2) the -headers relayer then needs to submit this header and its finality proof to the source chain; 3) the -messages relayer must then send confirmation transaction (storage proof of this map) to the source -chain; 4) when the confirmation transaction will be mined at some header, source chain authorities -must finalize this header; 5) the headers relay then needs to submit this header and its finality -proof to the target chain; 6) only now the messages relayer may submit new messages from the source -to target chain and prune the entry from the map. - -Delivery transaction requires the relayer to provide both number of entries and total number of -messages in the map. This means that the module never charges an extra cost for delivering a map - -the relayer would need to pay exactly for the number of entries+messages it has delivered. So the -best guess for values of these parameters would be the pair that would occupy `N` percent of the -maximal transaction size and weight of the source chain. The `N` should be large enough to process -large maps, at the same time keeping reserve for future source chain upgrades. - -## Non-Essential Functionality - -Apart from the message related calls, the module exposes a set of auxiliary calls. They fall in two -groups, described in the next two paragraphs. - -There may be a special account in every runtime where the messages module is deployed. This -account, named 'module owner', is like a module-level sudo account - he's able to halt all and -result all module operations without requiring runtime upgrade. The module may have no message -owner, but we suggest to use it at least for initial deployment. To calls that are related to this -account are: -- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; -- `fn halt_operations()`: the module owner (or sudo account) may call this function to stop all - module operations. After this call, all message-related transactions will be rejected until - further `resume_operations` call'. This call may be used when something extraordinary happens with - the bridge; -- `fn resume_operations()`: module owner may call this function to resume bridge operations. The - module will resume its regular operations after this call. - -Apart from halting and resuming the bridge, the module owner may also tune module configuration -parameters without runtime upgrades. The set of parameters needs to be designed in advance, though. -The module configuration trait has associated `Parameter` type, which may be e.g. enum and represent -a set of parameters that may be updated by the module owner. For example, if your bridge needs to -convert sums between different tokens, you may define a 'conversion rate' parameter and let the -module owner update this parameter when there are significant changes in the rate. The corresponding -module call is `fn update_pallet_parameter()`. - -## Weights of Module Extrinsics - -The main assumptions behind weight formulas is: -- all possible costs are paid in advance by the message submitter; -- whenever possible, relayer tries to minimize cost of its transactions. So e.g. even though sender - always pays for delivering outbound lane state proof, relayer may not include it in the delivery - transaction (unless messages module on target chain requires that); -- weight formula should incentivize relayer to not to submit any redundant data in the extrinsics - arguments; -- the extrinsic shall never be executing slower (i.e. has larger actual weight) than defined by the - formula. - -### Weight of `send_message` call - -#### Related benchmarks - -| Benchmark | Description | -|-----------------------------------|-----------------------------------------------------| -`send_minimal_message_worst_case` | Sends 0-size message with worst possible conditions | -`send_1_kb_message_worst_case` | Sends 1KB-size message with worst possible conditions | -`send_16_kb_message_worst_case` | Sends 16KB-size message with worst possible conditions | - -#### Weight formula - -The weight formula is: -``` -Weight = BaseWeight + MessageSizeInKilobytes * MessageKiloByteSendWeight -``` - -Where: - -| Component | How it is computed? | Description | -|-----------------------------|------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------| -| `SendMessageOverhead` | `send_minimal_message_worst_case` | Weight of sending minimal (0 bytes) message | -| `MessageKiloByteSendWeight` | `(send_16_kb_message_worst_case - send_1_kb_message_worst_case)/15` | Weight of sending every additional kilobyte of the message | - -### Weight of `receive_messages_proof` call - -#### Related benchmarks - -| Benchmark | Description* | -|---------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------| -| `receive_single_message_proof` | Receives proof of single `EXPECTED_DEFAULT_MESSAGE_LENGTH` message | -| `receive_two_messages_proof` | Receives proof of two identical `EXPECTED_DEFAULT_MESSAGE_LENGTH` messages | -| `receive_single_message_proof_with_outbound_lane_state` | Receives proof of single `EXPECTED_DEFAULT_MESSAGE_LENGTH` message and proof of outbound lane state at the source chain | -| `receive_single_message_proof_1_kb` | Receives proof of single message. The proof has size of approximately 1KB** | -| `receive_single_message_proof_16_kb` | Receives proof of single message. The proof has size of approximately 16KB** | - -*\* - In all benchmarks all received messages are dispatched and their dispatch cost is near to zero* - -*\*\* - Trie leafs are assumed to have minimal values. The proof is derived from the minimal proof -by including more trie nodes. That's because according to our additioal benchmarks, increasing proof -by including more nodes has slightly larger impact on performance than increasing values stored in leafs*. - -#### Weight formula - -The weight formula is: -``` -Weight = BaseWeight + OutboundStateDeliveryWeight - + MessagesCount * MessageDeliveryWeight - + MessagesDispatchWeight - + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight -``` - -Where: - -| Component | How it is computed? | Description | -|-------------------------------|------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `BaseWeight` | `2*receive_single_message_proof - receive_two_messages_proof` | Weight of receiving and parsing minimal proof | -| `OutboundStateDeliveryWeight` | `receive_single_message_proof_with_outbound_lane_state - receive_single_message_proof` | Additional weight when proof includes outbound lane state | -| `MessageDeliveryWeight` | `receive_two_messages_proof - receive_single_message_proof` | Weight of of parsing and dispatching (without actual dispatch cost) of every message | -| `MessagesCount` | | Provided by relayer | -| `MessagesDispatchWeight` | | Provided by relayer | -| `ActualProofSize` | | Provided by relayer | -| `ExpectedProofSize` | `EXPECTED_DEFAULT_MESSAGE_LENGTH * MessagesCount + EXTRA_STORAGE_PROOF_SIZE` | Size of proof that we are expecting. This only includes `EXTRA_STORAGE_PROOF_SIZE` once, because we assume that intermediate nodes likely to be included in the proof only once. This may be wrong, but since weight of processing proof with many nodes is almost equal to processing proof with large leafs, additional cost will be covered because we're charging for extra proof bytes anyway | -| `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)` | Weight of processing every additional proof byte over `ExpectedProofSize` limit | - -#### Why for every message sent using `send_message` we will be able to craft `receive_messages_proof` transaction? - -We have following checks in `send_message` transaction on the source chain: -- message size should be less than or equal to `2/3` of maximal extrinsic size on the target chain; -- message dispatch weight should be less than or equal to the `1/2` of maximal extrinsic dispatch - weight on the target chain. - -Delivery transaction is an encoded delivery call and signed extensions. So we have `1/3` of maximal -extrinsic size reserved for: -- storage proof, excluding the message itself. Currently, on our test chains, the overhead is always - within `EXTRA_STORAGE_PROOF_SIZE` limits (1024 bytes); -- signed extras and other call arguments (`relayer_id: SourceChain::AccountId`, `messages_count: - u32`, `dispatch_weight: u64`). - -On Millau chain, maximal extrinsic size is `0.75 * 2MB`, so `1/3` is `512KB` (`524_288` bytes). This -should be enough to cover these extra arguments and signed extensions. - -Let's exclude message dispatch cost from single message delivery transaction weight formula: -``` -Weight = BaseWeight + OutboundStateDeliveryWeight + MessageDeliveryWeight - + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight -``` - -So we have `1/2` of maximal extrinsic weight to cover these components. `BaseWeight`, -`OutboundStateDeliveryWeight` and `MessageDeliveryWeight` are determined using benchmarks and are -hardcoded into runtime. Adequate relayer would only include required trie nodes into the proof. So -if message size would be maximal (`2/3` of `MaximalExtrinsicSize`), then the extra proof size would -be `MaximalExtrinsicSize / 3 * 2 - EXPECTED_DEFAULT_MESSAGE_LENGTH`. - -Both conditions are verified by `pallet_bridge_messages::ensure_weights_are_correct` and -`pallet_bridge_messages::ensure_able_to_receive_messages` functions, which must be called from every -runtime's tests. - -#### Post-dispatch weight refunds of the `receive_messages_proof` call - -Weight formula of the `receive_messages_proof` call assumes that the dispatch fee of every message is -paid at the target chain (where call is executed), that every message will be dispatched and that -dispatch weight of the message will be exactly the weight that is returned from the -`MessageDispatch::dispatch_weight` method call. This isn't true for all messages, so the call returns -actual weight used to dispatch messages. - -This actual weight is the weight, returned by the weight formula, minus: -- the weight of undispatched messages, if we have failed to dispatch because of different issues; -- the unspent dispatch weight if the declared weight of some messages is less than their actual post-dispatch weight; -- the pay-dispatch-fee weight for every message that had dispatch fee paid at the source chain. - -The last component is computed as a difference between two benchmarks results - the `receive_single_message_proof` -benchmark (that assumes that the fee is paid during dispatch) and the `receive_single_prepaid_message_proof` -(that assumes that the dispatch fee is already paid). - -### Weight of `receive_messages_delivery_proof` call - -#### Related benchmarks - -| Benchmark | Description | -|-------------------------------------------------------------|------------------------------------------------------------------------------------------| -| `receive_delivery_proof_for_single_message` | Receives proof of single message delivery | -| `receive_delivery_proof_for_two_messages_by_single_relayer` | Receives proof of two messages delivery. Both messages are delivered by the same relayer | -| `receive_delivery_proof_for_two_messages_by_two_relayers` | Receives proof of two messages delivery. Messages are delivered by different relayers | - -#### Weight formula - -The weight formula is: -``` -Weight = BaseWeight + MessagesCount * MessageConfirmationWeight - + RelayersCount * RelayerRewardWeight - + Max(0, ActualProofSize - ExpectedProofSize) * ProofByteDeliveryWeight - + MessagesCount * (DbReadWeight + DbWriteWeight) -``` - -Where: - -| Component | How it is computed? | Description | -|---------------------------|-----------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `BaseWeight` | `2*receive_delivery_proof_for_single_message - receive_delivery_proof_for_two_messages_by_single_relayer` | Weight of receiving and parsing minimal delivery proof | -| `MessageDeliveryWeight` | `receive_delivery_proof_for_two_messages_by_single_relayer - receive_delivery_proof_for_single_message` | Weight of confirming every additional message | -| `MessagesCount` | | Provided by relayer | -| `RelayerRewardWeight` | `receive_delivery_proof_for_two_messages_by_two_relayers - receive_delivery_proof_for_two_messages_by_single_relayer` | Weight of rewarding every additional relayer | -| `RelayersCount` | | Provided by relayer | -| `ActualProofSize` | | Provided by relayer | -| `ExpectedProofSize` | `EXTRA_STORAGE_PROOF_SIZE` | Size of proof that we are expecting | -| `ProofByteDeliveryWeight` | `(receive_single_message_proof_16_kb - receive_single_message_proof_1_kb) / (15 * 1024)` | Weight of processing every additional proof byte over `ExpectedProofSize` limit. We're using the same formula, as for message delivery, because proof mechanism is assumed to be the same in both cases | - -#### Post-dispatch weight refunds of the `receive_messages_delivery_proof` call - -Weight formula of the `receive_messages_delivery_proof` call assumes that all messages in the proof -are actually delivered (so there are no already confirmed messages) and every messages is processed -by the `OnDeliveryConfirmed` callback. This means that for every message, we're adding single db read -weight and single db write weight. If, by some reason, messages are not processed by the -`OnDeliveryConfirmed` callback, or their processing is faster than that additional weight, the -difference is refunded to the submitter. - -#### Why we're always able to craft `receive_messages_delivery_proof` transaction? - -There can be at most `::MaxUnconfirmedMessagesAtInboundLane` -messages and at most -`::MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded -relayers in the single delivery confirmation transaction. - -We're checking that this transaction may be crafted in the -`pallet_bridge_messages::ensure_able_to_receive_confirmation` function, which must be called from every -runtime' tests. diff --git a/polkadot/bridges/modules/messages/src/benchmarking.rs b/polkadot/bridges/modules/messages/src/benchmarking.rs deleted file mode 100644 index 828a9ee653e..00000000000 --- a/polkadot/bridges/modules/messages/src/benchmarking.rs +++ /dev/null @@ -1,668 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Messages pallet benchmarking. - -use crate::{ - inbound_lane::InboundLaneStorage, inbound_lane_storage, outbound_lane, - outbound_lane::ReceivalConfirmationResult, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH, Call, -}; - -use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, - InboundLaneData, LaneId, MessageData, MessageNonce, OutboundLaneData, UnrewardedRelayer, - UnrewardedRelayersState, -}; -use bp_runtime::messages::DispatchFeePayment; -use frame_benchmarking::{account, benchmarks_instance_pallet}; -use frame_support::{traits::Get, weights::Weight}; -use frame_system::RawOrigin; -use sp_std::{collections::vec_deque::VecDeque, ops::RangeInclusive, prelude::*}; - -const SEED: u32 = 0; - -/// Pallet we're benchmarking here. -pub struct Pallet, I: 'static>(crate::Pallet); - -/// Proof size requirements. -#[derive(Clone, Copy, Debug)] -pub enum ProofSize { - /// The proof is expected to be minimal. If value size may be changed, then it is expected to - /// have given size. - Minimal(u32), - /// The proof is expected to have at least given size and grow by increasing number of trie - /// nodes included in the proof. - HasExtraNodes(u32), - /// The proof is expected to have at least given size and grow by increasing value that is - /// stored in the trie. - HasLargeLeaf(u32), -} - -/// Benchmark-specific message parameters. -#[derive(Debug)] -pub struct MessageParams { - /// Size of the message payload. - pub size: u32, - /// Message sender account. - pub sender_account: ThisAccountId, -} - -/// Benchmark-specific message proof parameters. -#[derive(Debug)] -pub struct MessageProofParams { - /// Id of the lane. - pub lane: LaneId, - /// Range of messages to include in the proof. - pub message_nonces: RangeInclusive, - /// If `Some`, the proof needs to include this outbound lane data. - pub outbound_lane_data: Option, - /// Proof size requirements. - pub size: ProofSize, - /// Where the fee for dispatching message is paid? - pub dispatch_fee_payment: DispatchFeePayment, -} - -/// Benchmark-specific message delivery proof parameters. -#[derive(Debug)] -pub struct MessageDeliveryProofParams { - /// Id of the lane. - pub lane: LaneId, - /// The proof needs to include this inbound lane data. - pub inbound_lane_data: InboundLaneData, - /// Proof size requirements. - pub size: ProofSize, -} - -/// Trait that must be implemented by runtime. -pub trait Config: crate::Config { - /// Lane id to use in benchmarks. - fn bench_lane_id() -> LaneId { - Default::default() - } - /// Get maximal size of the message payload. - fn maximal_message_size() -> u32; - /// Return id of relayer account at the bridged chain. - fn bridged_relayer_id() -> Self::InboundRelayer; - /// Return balance of given account. - fn account_balance(account: &Self::AccountId) -> Self::OutboundMessageFee; - /// Create given account and give it enough balance for test purposes. - fn endow_account(account: &Self::AccountId); - /// Fee paid by submitter for single message delivery. - fn message_fee() -> Self::OutboundMessageFee { - 100_000_000_000_000.into() - } - /// Prepare message to send over lane. - fn prepare_outbound_message( - params: MessageParams, - ) -> (Self::OutboundPayload, Self::OutboundMessageFee); - /// Prepare messages proof to receive by the module. - fn prepare_message_proof( - params: MessageProofParams, - ) -> ( - >::MessagesProof, - Weight, - ); - /// Prepare messages delivery proof to receive by the module. - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> >::MessagesDeliveryProof; - /// Returns true if message has been dispatched (either successfully or not). - fn is_message_dispatched(nonce: MessageNonce) -> bool; -} - -benchmarks_instance_pallet! { - // - // Benchmarks that are used directly by the runtime. - // - - // Benchmark `send_message` extrinsic with the worst possible conditions: - // * outbound lane already has state, so it needs to be read and decoded; - // * relayers fund account does not exists (in practice it needs to exist in production environment); - // * maximal number of messages is being pruned during the call; - // * message size is minimal for the target chain. - // - // Result of this benchmark is used as a base weight for `send_message` call. Then the 'message weight' - // (estimated using `send_half_maximal_message_worst_case` and `send_maximal_message_worst_case`) is - // added. - send_minimal_message_worst_case { - let lane_id = T::bench_lane_id(); - let relayers_fund_id = crate::relayer_fund_account_id::(); - let sender = account("sender", 0, SEED); - T::endow_account(&sender); - T::endow_account(&relayers_fund_id); - - // 'send' messages that are to be pruned when our message is sent - for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { - send_regular_message::(); - } - confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); - - let (payload, fee) = T::prepare_outbound_message(MessageParams { - size: 0, - sender_account: sender.clone(), - }); - }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) - verify { - assert_eq!( - crate::OutboundLanes::::get(&T::bench_lane_id()).latest_generated_nonce, - T::MaxMessagesToPruneAtOnce::get() + 1, - ); - } - - // Benchmark `send_message` extrinsic with the worst possible conditions: - // * outbound lane already has state, so it needs to be read and decoded; - // * relayers fund account does not exists (in practice it needs to exist in production environment); - // * maximal number of messages is being pruned during the call; - // * message size is 1KB. - // - // With single KB of message size, the weight of the call is increased (roughly) by - // `(send_16_kb_message_worst_case - send_1_kb_message_worst_case) / 15`. - send_1_kb_message_worst_case { - let lane_id = T::bench_lane_id(); - let relayers_fund_id = crate::relayer_fund_account_id::(); - let sender = account("sender", 0, SEED); - T::endow_account(&sender); - T::endow_account(&relayers_fund_id); - - // 'send' messages that are to be pruned when our message is sent - for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { - send_regular_message::(); - } - confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); - - let size = 1024; - assert!( - T::maximal_message_size() > size, - "This benchmark can only be used with runtime that accepts 1KB messages", - ); - - let (payload, fee) = T::prepare_outbound_message(MessageParams { - size, - sender_account: sender.clone(), - }); - }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) - verify { - assert_eq!( - crate::OutboundLanes::::get(&T::bench_lane_id()).latest_generated_nonce, - T::MaxMessagesToPruneAtOnce::get() + 1, - ); - } - - // Benchmark `send_message` extrinsic with the worst possible conditions: - // * outbound lane already has state, so it needs to be read and decoded; - // * relayers fund account does not exists (in practice it needs to exist in production environment); - // * maximal number of messages is being pruned during the call; - // * message size is 16KB. - // - // With single KB of message size, the weight of the call is increased (roughly) by - // `(send_16_kb_message_worst_case - send_1_kb_message_worst_case) / 15`. - send_16_kb_message_worst_case { - let lane_id = T::bench_lane_id(); - let relayers_fund_id = crate::relayer_fund_account_id::(); - let sender = account("sender", 0, SEED); - T::endow_account(&sender); - T::endow_account(&relayers_fund_id); - - // 'send' messages that are to be pruned when our message is sent - for _nonce in 1..=T::MaxMessagesToPruneAtOnce::get() { - send_regular_message::(); - } - confirm_message_delivery::(T::MaxMessagesToPruneAtOnce::get()); - - let size = 16 * 1024; - assert!( - T::maximal_message_size() > size, - "This benchmark can only be used with runtime that accepts 16KB messages", - ); - - let (payload, fee) = T::prepare_outbound_message(MessageParams { - size, - sender_account: sender.clone(), - }); - }: send_message(RawOrigin::Signed(sender), lane_id, payload, fee) - verify { - assert_eq!( - crate::OutboundLanes::::get(&T::bench_lane_id()).latest_generated_nonce, - T::MaxMessagesToPruneAtOnce::get() + 1, - ); - } - - // Benchmark `increase_message_fee` with following conditions: - // * message has maximal message; - // * submitter account is killed because its balance is less than ED after payment. - // - // Result of this benchmark is directly used by weight formula of the call. - maximal_increase_message_fee { - let relayers_fund_id = crate::relayer_fund_account_id::(); - let sender = account("sender", 42, SEED); - T::endow_account(&sender); - T::endow_account(&relayers_fund_id); - - let additional_fee = T::account_balance(&sender); - let lane_id = T::bench_lane_id(); - let nonce = 1; - - send_regular_message_with_payload::(vec![42u8; T::maximal_message_size() as _]); - }: increase_message_fee(RawOrigin::Signed(sender.clone()), lane_id, nonce, additional_fee) - verify { - assert_eq!(T::account_balance(&sender), 0.into()); - } - - // Benchmark `increase_message_fee` with following conditions: - // * message size varies from minimal to maximal; - // * submitter account is killed because its balance is less than ED after payment. - increase_message_fee { - let i in 0..T::maximal_message_size().try_into().unwrap_or_default(); - - let relayers_fund_id = crate::relayer_fund_account_id::(); - let sender = account("sender", 42, SEED); - T::endow_account(&sender); - T::endow_account(&relayers_fund_id); - - let additional_fee = T::account_balance(&sender); - let lane_id = T::bench_lane_id(); - let nonce = 1; - - send_regular_message_with_payload::(vec![42u8; i as _]); - }: increase_message_fee(RawOrigin::Signed(sender.clone()), lane_id, nonce, additional_fee) - verify { - assert_eq!(T::account_balance(&sender), 0.into()); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher; - // * message dispatch fee is paid at target (this) chain. - // - // This is base benchmark for all other message delivery benchmarks. - receive_single_message_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - assert!(T::is_message_dispatched(21)); - } - - // Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher; - // * message dispatch fee is paid at target (this) chain. - // - // The weight of single message delivery could be approximated as - // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_two_messages_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=22, - outbound_lane_data: None, - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 22, - ); - assert!(T::is_message_dispatched(22)); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof includes outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher; - // * message dispatch fee is paid at target (this) chain. - // - // The weight of outbound lane state delivery would be - // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_single_message_proof_with_outbound_lane_state { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 21, - latest_received_nonce: 20, - latest_generated_nonce: 21, - }), - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - let lane_state = crate::InboundLanes::::get(&T::bench_lane_id()); - assert_eq!(lane_state.last_delivered_nonce(), 21); - assert_eq!(lane_state.last_confirmed_nonce, 20); - assert!(T::is_message_dispatched(21)); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has many redundand trie nodes with total size of approximately 1KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof_1_kb) / 15`. - receive_single_message_proof_1_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::HasExtraNodes(1024), - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - assert!(T::is_message_dispatched(21)); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has many redundand trie nodes with total size of approximately 16KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher. - // - // Size of proof grows because it contains extra trie nodes in it. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof) / 15`. - receive_single_message_proof_16_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::HasExtraNodes(16 * 1024), - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - assert!(T::is_message_dispatched(21)); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched; - // * message requires all heavy checks done by dispatcher; - // * message dispatch fee is paid at source (bridged) chain. - // - // This benchmark is used to compute extra weight spent at target chain when fee is paid there. Then we use - // this information in two places: (1) to reduce weight of delivery tx if sender pays fee at the source chain - // and (2) to refund relayer with this weight if fee has been paid at the source chain. - receive_single_prepaid_message_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - size: ProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - assert!(T::is_message_dispatched(21)); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * single relayer is rewarded for relaying single message; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // This is base benchmark for all other confirmations delivery benchmarks. - receive_delivery_proof_for_single_message { - let relayers_fund_id = crate::relayer_fund_account_id::(); - let relayer_id: T::AccountId = account("relayer", 0, SEED); - let relayer_balance = T::account_balance(&relayer_id); - T::endow_account(&relayers_fund_id); - - // send message that we're going to confirm - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: relayer_id.clone(), - messages: DeliveredMessages::new(1, true), - }].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { - assert_eq!( - T::account_balance(&relayer_id), - relayer_balance + T::message_fee(), - ); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * single relayer is rewarded for relaying two messages; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // Additional weight for paying single-message reward to the same relayer could be computed - // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) - // - weight(receive_delivery_proof_for_single_message)`. - receive_delivery_proof_for_two_messages_by_single_relayer { - let relayers_fund_id = crate::relayer_fund_account_id::(); - let relayer_id: T::AccountId = account("relayer", 0, SEED); - let relayer_balance = T::account_balance(&relayer_id); - T::endow_account(&relayers_fund_id); - - // send message that we're going to confirm - send_regular_message::(); - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 2, - total_messages: 2, - }; - let mut delivered_messages = DeliveredMessages::new(1, true); - delivered_messages.note_dispatched_message(true); - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: relayer_id.clone(), - messages: delivered_messages, - }].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { - ensure_relayer_rewarded::(&relayer_id, &relayer_balance); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * two relayers are rewarded for relaying single message each; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // Additional weight for paying reward to the next relayer could be computed - // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) - // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. - receive_delivery_proof_for_two_messages_by_two_relayers { - let relayers_fund_id = crate::relayer_fund_account_id::(); - let relayer1_id: T::AccountId = account("relayer1", 1, SEED); - let relayer1_balance = T::account_balance(&relayer1_id); - let relayer2_id: T::AccountId = account("relayer2", 2, SEED); - let relayer2_balance = T::account_balance(&relayer2_id); - T::endow_account(&relayers_fund_id); - - // send message that we're going to confirm - send_regular_message::(); - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: relayer1_id.clone(), - messages: DeliveredMessages::new(1, true), - }, - UnrewardedRelayer { - relayer: relayer2_id.clone(), - messages: DeliveredMessages::new(2, true), - }, - ].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: ProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer1_id.clone()), proof, relayers_state) - verify { - ensure_relayer_rewarded::(&relayer1_id, &relayer1_balance); - ensure_relayer_rewarded::(&relayer2_id, &relayer2_balance); - } -} - -fn send_regular_message, I: 'static>() { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(MessageData { payload: vec![], fee: T::message_fee() }); -} - -fn send_regular_message_with_payload, I: 'static>(payload: Vec) { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(MessageData { payload, fee: T::message_fee() }); -} - -fn confirm_message_delivery, I: 'static>(nonce: MessageNonce) { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - let latest_received_nonce = outbound_lane.data().latest_received_nonce; - let mut relayers = VecDeque::with_capacity((nonce - latest_received_nonce) as usize); - for nonce in latest_received_nonce + 1..=nonce { - relayers.push_back(UnrewardedRelayer { - relayer: (), - messages: DeliveredMessages::new(nonce, true), - }); - } - assert!(matches!( - outbound_lane.confirm_delivery(nonce - latest_received_nonce, nonce, &relayers), - ReceivalConfirmationResult::ConfirmedMessages(_), - )); -} - -fn receive_messages, I: 'static>(nonce: MessageNonce) { - let mut inbound_lane_storage = inbound_lane_storage::(T::bench_lane_id()); - inbound_lane_storage.set_data(InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: T::bridged_relayer_id(), - messages: DeliveredMessages::new(nonce, true), - }] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }); -} - -fn ensure_relayer_rewarded, I: 'static>( - relayer_id: &T::AccountId, - old_balance: &T::OutboundMessageFee, -) { - let new_balance = T::account_balance(relayer_id); - assert!( - new_balance > *old_balance, - "Relayer haven't received reward for relaying message: old balance = {:?}, new balance = {:?}", - old_balance, - new_balance, - ); -} diff --git a/polkadot/bridges/modules/messages/src/inbound_lane.rs b/polkadot/bridges/modules/messages/src/inbound_lane.rs deleted file mode 100644 index 00875bb878a..00000000000 --- a/polkadot/bridges/modules/messages/src/inbound_lane.rs +++ /dev/null @@ -1,504 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything about incoming messages receival. - -use bp_messages::{ - target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, - UnrewardedRelayer, -}; -use bp_runtime::messages::MessageDispatchResult; -use frame_support::RuntimeDebug; -use sp_std::prelude::PartialEq; - -/// Inbound lane storage. -pub trait InboundLaneStorage { - /// Delivery and dispatch fee type on source chain. - type MessageFee; - /// Id of relayer on source chain. - type Relayer: Clone + PartialEq; - - /// Lane id. - fn id(&self) -> LaneId; - /// Return maximal number of unrewarded relayer entries in inbound lane. - fn max_unrewarded_relayer_entries(&self) -> MessageNonce; - /// Return maximal number of unconfirmed messages in inbound lane. - fn max_unconfirmed_messages(&self) -> MessageNonce; - /// Get lane data from the storage. - fn data(&self) -> InboundLaneData; - /// Update lane data in the storage. - fn set_data(&mut self, data: InboundLaneData); -} - -/// Result of single message receival. -#[derive(RuntimeDebug, PartialEq, Eq)] -pub enum ReceivalResult { - /// Message has been received and dispatched. Note that we don't care whether dispatch has - /// been successful or not - in both case message falls into this category. - /// - /// The message dispatch result is also returned. - Dispatched(MessageDispatchResult), - /// Message has invalid nonce and lane has rejected to accept this message. - InvalidNonce, - /// There are too many unrewarded relayer entries at the lane. - TooManyUnrewardedRelayers, - /// There are too many unconfirmed messages at the lane. - TooManyUnconfirmedMessages, -} - -/// Inbound messages lane. -pub struct InboundLane { - storage: S, -} - -impl InboundLane { - /// Create new inbound lane backed by given storage. - pub fn new(storage: S) -> Self { - InboundLane { storage } - } - - /// Receive state of the corresponding outbound lane. - pub fn receive_state_update( - &mut self, - outbound_lane_data: OutboundLaneData, - ) -> Option { - let mut data = self.storage.data(); - let last_delivered_nonce = data.last_delivered_nonce(); - - if outbound_lane_data.latest_received_nonce > last_delivered_nonce { - // this is something that should never happen if proofs are correct - return None - } - if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce { - return None - } - - let new_confirmed_nonce = outbound_lane_data.latest_received_nonce; - data.last_confirmed_nonce = new_confirmed_nonce; - // Firstly, remove all of the records where higher nonce <= new confirmed nonce - while data - .relayers - .front() - .map(|entry| entry.messages.end <= new_confirmed_nonce) - .unwrap_or(false) - { - data.relayers.pop_front(); - } - // Secondly, update the next record with lower nonce equal to new confirmed nonce if needed. - // Note: There will be max. 1 record to update as we don't allow messages from relayers to - // overlap. - match data.relayers.front_mut() { - Some(entry) if entry.messages.begin < new_confirmed_nonce => { - entry.messages.dispatch_results = entry - .messages - .dispatch_results - .split_off((new_confirmed_nonce + 1 - entry.messages.begin) as _); - entry.messages.begin = new_confirmed_nonce + 1; - }, - _ => {}, - } - - self.storage.set_data(data); - Some(outbound_lane_data.latest_received_nonce) - } - - /// Receive new message. - pub fn receive_message, AccountId>( - &mut self, - relayer_at_bridged_chain: &S::Relayer, - relayer_at_this_chain: &AccountId, - nonce: MessageNonce, - message_data: DispatchMessageData, - ) -> ReceivalResult { - let mut data = self.storage.data(); - let is_correct_message = nonce == data.last_delivered_nonce() + 1; - if !is_correct_message { - return ReceivalResult::InvalidNonce - } - - // if there are more unrewarded relayer entries than we may accept, reject this message - if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return ReceivalResult::TooManyUnrewardedRelayers - } - - // if there are more unconfirmed messages than we may accept, reject this message - let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); - if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return ReceivalResult::TooManyUnconfirmedMessages - } - - // then, dispatch message - let dispatch_result = P::dispatch( - relayer_at_this_chain, - DispatchMessage { - key: MessageKey { lane_id: self.storage.id(), nonce }, - data: message_data, - }, - ); - - // now let's update inbound lane storage - let push_new = match data.relayers.back_mut() { - Some(entry) if entry.relayer == *relayer_at_bridged_chain => { - entry.messages.note_dispatched_message(dispatch_result.dispatch_result); - false - }, - _ => true, - }; - if push_new { - data.relayers.push_back(UnrewardedRelayer { - relayer: (*relayer_at_bridged_chain).clone(), - messages: DeliveredMessages::new(nonce, dispatch_result.dispatch_result), - }); - } - self.storage.set_data(data); - - ReceivalResult::Dispatched(dispatch_result) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - inbound_lane, - mock::{ - dispatch_result, message_data, run_test, unrewarded_relayer, TestMessageDispatch, - TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, - TEST_RELAYER_C, - }, - RuntimeInboundLaneStorage, - }; - - fn receive_regular_message( - lane: &mut InboundLane>, - nonce: MessageNonce, - ) { - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - nonce, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - } - - #[test] - fn receive_status_update_ignores_status_from_the_future() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 10, - ..Default::default() - }), - None, - ); - - assert_eq!(lane.storage.data().last_confirmed_nonce, 0); - }); - } - - #[test] - fn receive_status_update_ignores_obsolete_status() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - receive_regular_message(&mut lane, 2); - receive_regular_message(&mut lane, 3); - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - None, - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - }); - } - - #[test] - fn receive_status_update_works() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - receive_regular_message(&mut lane, 2); - receive_regular_message(&mut lane, 3); - assert_eq!(lane.storage.data().last_confirmed_nonce, 0); - assert_eq!( - lane.storage.data().relayers, - vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)] - ); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 2, - ..Default::default() - }), - Some(2), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 2); - assert_eq!( - lane.storage.data().relayers, - vec![unrewarded_relayer(3, 3, TEST_RELAYER_A)] - ); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - assert_eq!(lane.storage.data().relayers, vec![]); - }); - } - - #[test] - fn receive_status_update_works_with_batches_from_relayers() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let mut seed_storage_data = lane.storage.data(); - // Prepare data - seed_storage_data.last_confirmed_nonce = 0; - seed_storage_data.relayers.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A)); - // Simulate messages batch (2, 3, 4) from relayer #2 - seed_storage_data.relayers.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B)); - seed_storage_data.relayers.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C)); - lane.storage.set_data(seed_storage_data); - // Check - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.data().last_confirmed_nonce, 3); - assert_eq!( - lane.storage.data().relayers, - vec![ - unrewarded_relayer(4, 4, TEST_RELAYER_B), - unrewarded_relayer(5, 5, TEST_RELAYER_C) - ] - ); - }); - } - - #[test] - fn fails_to_receive_message_with_incorrect_nonce() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - 10, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::InvalidNonce - ); - assert_eq!(lane.storage.data().last_delivered_nonce(), 0); - }); - } - - #[test] - fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = - ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); - for current_nonce in 1..max_nonce + 1 { - assert_eq!( - lane.receive_message::( - &(TEST_RELAYER_A + current_nonce), - &(TEST_RELAYER_A + current_nonce), - current_nonce, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - } - // Fails to dispatch new message from different than latest relayer. - assert_eq!( - lane.receive_message::( - &(TEST_RELAYER_A + max_nonce + 1), - &(TEST_RELAYER_A + max_nonce + 1), - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::TooManyUnrewardedRelayers, - ); - // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. - assert_eq!( - lane.receive_message::( - &(TEST_RELAYER_A + max_nonce), - &(TEST_RELAYER_A + max_nonce), - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::TooManyUnrewardedRelayers, - ); - }); - } - - #[test] - fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = - ::MaxUnconfirmedMessagesAtInboundLane::get(); - for current_nonce in 1..=max_nonce { - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - current_nonce, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - } - // Fails to dispatch new message from different than latest relayer. - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_B, - &TEST_RELAYER_B, - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::TooManyUnconfirmedMessages, - ); - // Fails to dispatch new messages from latest relayer. - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - max_nonce + 1, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::TooManyUnconfirmedMessages, - ); - }); - } - - #[test] - fn correctly_receives_following_messages_from_two_relayers_alternately() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - 1, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_B, - &TEST_RELAYER_B, - 2, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - 3, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.storage.data().relayers, - vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B), - unrewarded_relayer(3, 3, TEST_RELAYER_A) - ] - ); - }); - } - - #[test] - fn rejects_same_message_from_two_different_relayers() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - 1, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_B, - &TEST_RELAYER_B, - 1, - message_data(REGULAR_PAYLOAD).into() - ), - ReceivalResult::InvalidNonce, - ); - }); - } - - #[test] - fn correct_message_is_processed_instantly() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - assert_eq!(lane.storage.data().last_delivered_nonce(), 1); - }); - } - - #[test] - fn unspent_weight_is_returned_by_receive_message() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let mut payload = REGULAR_PAYLOAD; - payload.dispatch_result.unspent_weight = 1; - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - &TEST_RELAYER_A, - 1, - message_data(payload).into() - ), - ReceivalResult::Dispatched(dispatch_result(1)) - ); - }); - } -} diff --git a/polkadot/bridges/modules/messages/src/instant_payments.rs b/polkadot/bridges/modules/messages/src/instant_payments.rs deleted file mode 100644 index 2a620a95222..00000000000 --- a/polkadot/bridges/modules/messages/src/instant_payments.rs +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Implementation of `MessageDeliveryAndDispatchPayment` trait on top of `Currency` trait. -//! -//! The payment is first transferred to a special `relayers-fund` account and only transferred -//! to the actual relayer in case confirmation is received. - -use crate::OutboundMessages; - -use bp_messages::{ - source_chain::{MessageDeliveryAndDispatchPayment, RelayersRewards, SenderOrigin}, - LaneId, MessageKey, MessageNonce, UnrewardedRelayer, -}; -use codec::Encode; -use frame_support::traits::{Currency as CurrencyT, ExistenceRequirement, Get}; -use num_traits::{SaturatingAdd, Zero}; -use sp_runtime::traits::Saturating; -use sp_std::{collections::vec_deque::VecDeque, fmt::Debug, ops::RangeInclusive}; - -/// Error that occurs when message fee is non-zero, but payer is not defined. -const NON_ZERO_MESSAGE_FEE_CANT_BE_PAID_BY_NONE: &str = - "Non-zero message fee can't be paid by "; - -/// Instant message payments made in given currency. -/// -/// The balance is initially reserved in a special `relayers-fund` account, and transferred -/// to the relayer when message delivery is confirmed. -/// -/// Additionally, confirmation transaction submitter (`confirmation_relayer`) is reimbursed -/// with the confirmation rewards (part of message fee, reserved to pay for delivery confirmation). -/// -/// NOTE The `relayers-fund` account must always exist i.e. be over Existential Deposit (ED; the -/// pallet enforces that) to make sure that even if the message cost is below ED it is still paid -/// to the relayer account. -/// NOTE It's within relayer's interest to keep their balance above ED as well, to make sure they -/// can receive the payment. -pub struct InstantCurrencyPayments { - _phantom: sp_std::marker::PhantomData<(T, I, Currency, GetConfirmationFee)>, -} - -impl - MessageDeliveryAndDispatchPayment - for InstantCurrencyPayments -where - T: frame_system::Config + crate::Config, - I: 'static, - T::Origin: SenderOrigin, - Currency: CurrencyT, - Currency::Balance: From, - GetConfirmationFee: Get, -{ - type Error = &'static str; - - fn pay_delivery_and_dispatch_fee( - submitter: &T::Origin, - fee: &Currency::Balance, - relayer_fund_account: &T::AccountId, - ) -> Result<(), Self::Error> { - let submitter_account = match submitter.linked_account() { - Some(submitter_account) => submitter_account, - None if !fee.is_zero() => { - // if we'll accept some message that has declared that the `fee` has been paid but - // it isn't actually paid, then it'll lead to problems with delivery confirmation - // payments (see `pay_relayer_rewards` && `confirmation_relayer` in particular) - return Err(NON_ZERO_MESSAGE_FEE_CANT_BE_PAID_BY_NONE) - }, - None => { - // message lane verifier has accepted the message before, so this message - // is unpaid **by design** - // => let's just do nothing - return Ok(()) - }, - }; - - if !frame_system::Pallet::::account_exists(relayer_fund_account) { - return Err("The relayer fund account must exist for the message lanes pallet to work correctly."); - } - - Currency::transfer( - &submitter_account, - relayer_fund_account, - *fee, - // it's fine for the submitter to go below Existential Deposit and die. - ExistenceRequirement::AllowDeath, - ) - .map_err(Into::into) - } - - fn pay_relayers_rewards( - lane_id: LaneId, - messages_relayers: VecDeque>, - confirmation_relayer: &T::AccountId, - received_range: &RangeInclusive, - relayer_fund_account: &T::AccountId, - ) { - let relayers_rewards = - cal_relayers_rewards::(lane_id, messages_relayers, received_range); - if !relayers_rewards.is_empty() { - pay_relayers_rewards::( - confirmation_relayer, - relayers_rewards, - relayer_fund_account, - GetConfirmationFee::get(), - ); - } - } -} - -/// Calculate the relayers rewards -pub(crate) fn cal_relayers_rewards( - lane_id: LaneId, - messages_relayers: VecDeque>, - received_range: &RangeInclusive, -) -> RelayersRewards -where - T: frame_system::Config + crate::Config, - I: 'static, -{ - // remember to reward relayers that have delivered messages - // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain - let mut relayers_rewards: RelayersRewards<_, T::OutboundMessageFee> = RelayersRewards::new(); - for entry in messages_relayers { - let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start()); - let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end()); - - // loop won't proceed if current entry is ahead of received range (begin > end). - // this loop is bound by `T::MaxUnconfirmedMessagesAtInboundLane` on the bridged chain - let mut relayer_reward = relayers_rewards.entry(entry.relayer).or_default(); - for nonce in nonce_begin..nonce_end + 1 { - let message_data = OutboundMessages::::get(MessageKey { lane_id, nonce }) - .expect("message was just confirmed; we never prune unconfirmed messages; qed"); - relayer_reward.reward = relayer_reward.reward.saturating_add(&message_data.fee); - relayer_reward.messages += 1; - } - } - relayers_rewards -} - -/// Pay rewards to given relayers, optionally rewarding confirmation relayer. -fn pay_relayers_rewards( - confirmation_relayer: &AccountId, - relayers_rewards: RelayersRewards, - relayer_fund_account: &AccountId, - confirmation_fee: Currency::Balance, -) where - AccountId: Debug + Encode + PartialEq, - Currency: CurrencyT, - Currency::Balance: From, -{ - // reward every relayer except `confirmation_relayer` - let mut confirmation_relayer_reward = Currency::Balance::zero(); - for (relayer, reward) in relayers_rewards { - let mut relayer_reward = reward.reward; - - if relayer != *confirmation_relayer { - // If delivery confirmation is submitted by other relayer, let's deduct confirmation fee - // from relayer reward. - // - // If confirmation fee has been increased (or if it was the only component of message - // fee), then messages relayer may receive zero reward. - let mut confirmation_reward = confirmation_fee.saturating_mul(reward.messages.into()); - if confirmation_reward > relayer_reward { - confirmation_reward = relayer_reward; - } - relayer_reward = relayer_reward.saturating_sub(confirmation_reward); - confirmation_relayer_reward = - confirmation_relayer_reward.saturating_add(confirmation_reward); - } else { - // If delivery confirmation is submitted by this relayer, let's add confirmation fee - // from other relayers to this relayer reward. - confirmation_relayer_reward = confirmation_relayer_reward.saturating_add(reward.reward); - continue - } - - pay_relayer_reward::(relayer_fund_account, &relayer, relayer_reward); - } - - // finally - pay reward to confirmation relayer - pay_relayer_reward::( - relayer_fund_account, - confirmation_relayer, - confirmation_relayer_reward, - ); -} - -/// Transfer funds from relayers fund account to given relayer. -fn pay_relayer_reward( - relayer_fund_account: &AccountId, - relayer_account: &AccountId, - reward: Currency::Balance, -) where - AccountId: Debug, - Currency: CurrencyT, -{ - if reward.is_zero() { - return - } - - let pay_result = Currency::transfer( - relayer_fund_account, - relayer_account, - reward, - // the relayer fund account must stay above ED (needs to be pre-funded) - ExistenceRequirement::KeepAlive, - ); - - match pay_result { - Ok(_) => log::trace!( - target: "runtime::bridge-messages", - "Rewarded relayer {:?} with {:?}", - relayer_account, - reward, - ), - Err(error) => log::trace!( - target: "runtime::bridge-messages", - "Failed to pay relayer {:?} reward {:?}: {:?}", - relayer_account, - reward, - error, - ), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - run_test, AccountId as TestAccountId, Balance as TestBalance, Origin, TestRuntime, - }; - use bp_messages::source_chain::RelayerRewards; - - type Balances = pallet_balances::Pallet; - - const RELAYER_1: TestAccountId = 1; - const RELAYER_2: TestAccountId = 2; - const RELAYER_3: TestAccountId = 3; - const RELAYERS_FUND_ACCOUNT: TestAccountId = crate::mock::ENDOWED_ACCOUNT; - - fn relayers_rewards() -> RelayersRewards { - vec![ - (RELAYER_1, RelayerRewards { reward: 100, messages: 2 }), - (RELAYER_2, RelayerRewards { reward: 100, messages: 3 }), - ] - .into_iter() - .collect() - } - - #[test] - fn pay_delivery_and_dispatch_fee_fails_on_non_zero_fee_and_unknown_payer() { - frame_support::parameter_types! { - const GetConfirmationFee: TestBalance = 0; - }; - - run_test(|| { - let result = InstantCurrencyPayments::< - TestRuntime, - (), - Balances, - GetConfirmationFee, - >::pay_delivery_and_dispatch_fee( - &Origin::root(), - &100, - &RELAYERS_FUND_ACCOUNT, - ); - assert_eq!(result, Err(NON_ZERO_MESSAGE_FEE_CANT_BE_PAID_BY_NONE)); - }); - } - - #[test] - fn pay_delivery_and_dispatch_succeeds_on_zero_fee_and_unknown_payer() { - frame_support::parameter_types! { - const GetConfirmationFee: TestBalance = 0; - }; - - run_test(|| { - let result = InstantCurrencyPayments::< - TestRuntime, - (), - Balances, - GetConfirmationFee, - >::pay_delivery_and_dispatch_fee( - &Origin::root(), - &0, - &RELAYERS_FUND_ACCOUNT, - ); - assert!(result.is_ok()); - }); - } - - #[test] - fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { - run_test(|| { - pay_relayers_rewards::( - &RELAYER_2, - relayers_rewards(), - &RELAYERS_FUND_ACCOUNT, - 10, - ); - - assert_eq!(Balances::free_balance(&RELAYER_1), 80); - assert_eq!(Balances::free_balance(&RELAYER_2), 120); - }); - } - - #[test] - fn confirmation_relayer_is_rewarded_if_it_has_not_delivered_any_delivered_messages() { - run_test(|| { - pay_relayers_rewards::( - &RELAYER_3, - relayers_rewards(), - &RELAYERS_FUND_ACCOUNT, - 10, - ); - - assert_eq!(Balances::free_balance(&RELAYER_1), 80); - assert_eq!(Balances::free_balance(&RELAYER_2), 70); - assert_eq!(Balances::free_balance(&RELAYER_3), 50); - }); - } - - #[test] - fn only_confirmation_relayer_is_rewarded_if_confirmation_fee_has_significantly_increased() { - run_test(|| { - pay_relayers_rewards::( - &RELAYER_3, - relayers_rewards(), - &RELAYERS_FUND_ACCOUNT, - 1000, - ); - - assert_eq!(Balances::free_balance(&RELAYER_1), 0); - assert_eq!(Balances::free_balance(&RELAYER_2), 0); - assert_eq!(Balances::free_balance(&RELAYER_3), 200); - }); - } -} diff --git a/polkadot/bridges/modules/messages/src/lib.rs b/polkadot/bridges/modules/messages/src/lib.rs deleted file mode 100644 index c13e515cebe..00000000000 --- a/polkadot/bridges/modules/messages/src/lib.rs +++ /dev/null @@ -1,2310 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows sending and receiving messages using lane concept: -//! -//! 1) the message is sent using `send_message()` call; -//! 2) every outbound message is assigned nonce; -//! 3) the messages are stored in the storage; -//! 4) external component (relay) delivers messages to bridged chain; -//! 5) messages are processed in order (ordered by assigned nonce); -//! 6) relay may send proof-of-delivery back to this chain. -//! -//! Once message is sent, its progress can be tracked by looking at module events. -//! The assigned nonce is reported using `MessageAccepted` event. When message is -//! delivered to the the bridged chain, it is reported using `MessagesDelivered` event. -//! -//! **IMPORTANT NOTE**: after generating weights (custom `WeighInfo` implementation) for -//! your runtime (where this module is plugged to), please add test for these weights. -//! The test should call the `ensure_weights_are_correct` function from this module. -//! If this test fails with your weights, then either weights are computed incorrectly, -//! or some benchmarks assumptions are broken for your runtime. - -#![cfg_attr(not(feature = "std"), no_std)] -// Generated by `decl_event!` -#![allow(clippy::unused_unit)] - -pub use crate::weights_ext::{ - ensure_able_to_receive_confirmation, ensure_able_to_receive_message, - ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH, -}; - -use crate::{ - inbound_lane::{InboundLane, InboundLaneStorage, ReceivalResult}, - outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationResult}, - weights::WeightInfo, -}; - -use bp_messages::{ - source_chain::{ - LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, - OnMessageAccepted, SendMessageArtifacts, TargetHeaderChain, - }, - target_chain::{ - DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain, - }, - total_unrewarded_messages, DeliveredMessages, InboundLaneData, LaneId, MessageData, MessageKey, - MessageNonce, OperatingMode, OutboundLaneData, Parameter as MessagesParameter, - UnrewardedRelayersState, -}; -use bp_runtime::{ChainId, Size}; -use codec::{Decode, Encode}; -use frame_support::{ - fail, - traits::Get, - weights::{Pays, PostDispatchInfo}, -}; -use frame_system::RawOrigin; -use num_traits::{SaturatingAdd, Zero}; -use sp_core::H256; -use sp_runtime::traits::{BadOrigin, Convert}; -use sp_std::{cell::RefCell, cmp::PartialOrd, marker::PhantomData, prelude::*}; - -mod inbound_lane; -mod outbound_lane; -mod weights_ext; - -pub mod instant_payments; -pub mod weights; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -#[cfg(test)] -mod mock; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - // General types - - /// The overarching event type. - type Event: From> + IsType<::Event>; - /// Benchmarks results from runtime we're plugged into. - type WeightInfo: WeightInfoExt; - - /// Gets the chain id value from the instance. - #[pallet::constant] - type BridgedChainId: Get; - /// Pallet parameter that is opaque to the pallet itself, but may be used by the runtime - /// for integrating the pallet. - /// - /// All pallet parameters may only be updated either by the root, or by the pallet owner. - type Parameter: MessagesParameter; - - /// Maximal number of messages that may be pruned during maintenance. Maintenance occurs - /// whenever new message is sent. The reason is that if you want to use lane, you should - /// be ready to pay for its maintenance. - type MaxMessagesToPruneAtOnce: Get; - /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the - /// relayer has delivered messages, but either confirmations haven't been delivered back to - /// the source chain, or we haven't received reward confirmations yet. - /// - /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep - /// in mind that the same relayer account may take several (non-consecutive) entries in this - /// set. - type MaxUnrewardedRelayerEntriesAtInboundLane: Get; - /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the - /// message has been delivered, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations for these messages yet. - /// - /// This constant limits difference between last message from last entry of the - /// `InboundLaneData::relayers` and first message at the first entry. - /// - /// There is no point of making this parameter lesser than - /// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries - /// will be limited by maximal number of messages. - /// - /// This value also represents maximal number of messages in single delivery transaction. - /// Transaction that is declaring more messages than this value, will be rejected. Even if - /// these messages are from different lanes. - type MaxUnconfirmedMessagesAtInboundLane: Get; - - /// Payload type of outbound messages. This payload is dispatched on the bridged chain. - type OutboundPayload: Parameter + Size; - /// Message fee type of outbound messages. This fee is paid on this chain. - type OutboundMessageFee: Default - + From - + PartialOrd - + Parameter - + SaturatingAdd - + Zero - + Copy; - - /// Payload type of inbound messages. This payload is dispatched on this chain. - type InboundPayload: Decode; - /// Message fee type of inbound messages. This fee is paid on the bridged chain. - type InboundMessageFee: Decode; - /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the - /// bridged chain. - type InboundRelayer: Parameter; - - /// A type which can be turned into an AccountId from a 256-bit hash. - /// - /// Used when deriving the shared relayer fund account. - type AccountIdConverter: sp_runtime::traits::Convert; - - // Types that are used by outbound_lane (on source chain). - - /// Target header chain. - type TargetHeaderChain: TargetHeaderChain; - /// Message payload verifier. - type LaneMessageVerifier: LaneMessageVerifier< - Self::Origin, - Self::AccountId, - Self::OutboundPayload, - Self::OutboundMessageFee, - >; - /// Message delivery payment. - type MessageDeliveryAndDispatchPayment: MessageDeliveryAndDispatchPayment< - Self::Origin, - Self::AccountId, - Self::OutboundMessageFee, - >; - /// Handler for accepted messages. - type OnMessageAccepted: OnMessageAccepted; - /// Handler for delivered messages. - type OnDeliveryConfirmed: OnDeliveryConfirmed; - - // Types that are used by inbound_lane (on target chain). - - /// Source header chain, as it is represented on target chain. - type SourceHeaderChain: SourceHeaderChain; - /// Message dispatch. - type MessageDispatch: MessageDispatch< - Self::AccountId, - Self::InboundMessageFee, - DispatchPayload = Self::InboundPayload, - >; - } - - /// Shortcut to messages proof type for Config. - type MessagesProofOf = <>::SourceHeaderChain as SourceHeaderChain< - >::InboundMessageFee, - >>::MessagesProof; - /// Shortcut to messages delivery proof type for Config. - type MessagesDeliveryProofOf = - <>::TargetHeaderChain as TargetHeaderChain< - >::OutboundPayload, - ::AccountId, - >>::MessagesDeliveryProof; - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::call] - impl, I: 'static> Pallet { - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { - ensure_owner_or_root::(origin)?; - match new_owner { - Some(new_owner) => { - PalletOwner::::put(&new_owner); - log::info!(target: "runtime::bridge-messages", "Setting pallet Owner to: {:?}", new_owner); - }, - None => { - PalletOwner::::kill(); - log::info!(target: "runtime::bridge-messages", "Removed Owner of pallet."); - }, - } - Ok(()) - } - - /// Halt or resume all/some pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - operating_mode: OperatingMode, - ) -> DispatchResult { - ensure_owner_or_root::(origin)?; - PalletOperatingMode::::put(operating_mode); - log::info!( - target: "runtime::bridge-messages", - "Setting messages pallet operating mode to {:?}.", - operating_mode, - ); - Ok(()) - } - - /// Update pallet parameter. - /// - /// May only be called either by root, or by `PalletOwner`. - /// - /// The weight is: single read for permissions check + 2 writes for parameter value and - /// event. - #[pallet::weight((T::DbWeight::get().reads_writes(1, 2), DispatchClass::Operational))] - pub fn update_pallet_parameter( - origin: OriginFor, - parameter: T::Parameter, - ) -> DispatchResult { - ensure_owner_or_root::(origin)?; - parameter.save(); - Self::deposit_event(Event::ParameterUpdated { parameter }); - Ok(()) - } - - /// Send message over lane. - #[pallet::weight(T::WeightInfo::send_message_weight(payload, T::DbWeight::get()))] - pub fn send_message( - origin: OriginFor, - lane_id: LaneId, - payload: T::OutboundPayload, - delivery_and_dispatch_fee: T::OutboundMessageFee, - ) -> DispatchResultWithPostInfo { - crate::send_message::(origin, lane_id, payload, delivery_and_dispatch_fee).map( - |sent_message| PostDispatchInfo { - actual_weight: Some(sent_message.weight), - pays_fee: Pays::Yes, - }, - ) - } - - /// Pay additional fee for the message. - #[pallet::weight(T::WeightInfo::maximal_increase_message_fee())] - pub fn increase_message_fee( - origin: OriginFor, - lane_id: LaneId, - nonce: MessageNonce, - additional_fee: T::OutboundMessageFee, - ) -> DispatchResultWithPostInfo { - ensure_not_halted::()?; - // if someone tries to pay for already-delivered message, we're rejecting this intention - // (otherwise this additional fee will be locked forever in relayers fund) - // - // if someone tries to pay for not-yet-sent message, we're rejecting this intention, or - // we're risking to have mess in the storage - let lane = outbound_lane::(lane_id); - ensure!( - nonce > lane.data().latest_received_nonce, - Error::::MessageIsAlreadyDelivered - ); - ensure!( - nonce <= lane.data().latest_generated_nonce, - Error::::MessageIsNotYetSent - ); - - // withdraw additional fee from submitter - T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( - &origin, - &additional_fee, - &relayer_fund_account_id::(), - ) - .map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Submitter can't pay additional fee {:?} for the message {:?}/{:?} to {:?}: {:?}", - additional_fee, - lane_id, - nonce, - relayer_fund_account_id::(), - err, - ); - - Error::::FailedToWithdrawMessageFee - })?; - - // and finally update fee in the storage - let message_key = MessageKey { lane_id, nonce }; - let message_size = OutboundMessages::::mutate(message_key, |message_data| { - // saturating_add is fine here - overflow here means that someone controls all - // chain funds, which shouldn't ever happen + `pay_delivery_and_dispatch_fee` - // above will fail before we reach here - let message_data = message_data.as_mut().expect( - "the message is sent and not yet delivered; so it is in the storage; qed", - ); - message_data.fee = message_data.fee.saturating_add(&additional_fee); - message_data.payload.len() - }); - - // compute actual dispatch weight that depends on the stored message size - let actual_weight = sp_std::cmp::min( - T::WeightInfo::maximal_increase_message_fee(), - T::WeightInfo::increase_message_fee(message_size as _), - ); - - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) - } - - /// Receive messages proof from bridged chain. - /// - /// The weight of the call assumes that the transaction always brings outbound lane - /// state update. Because of that, the submitter (relayer) has no benefit of not including - /// this data in the transaction, so reward confirmations lags should be minimal. - #[pallet::weight(T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight))] - pub fn receive_messages_proof( - origin: OriginFor, - relayer_id_at_bridged_chain: T::InboundRelayer, - proof: MessagesProofOf, - messages_count: u32, - dispatch_weight: Weight, - ) -> DispatchResultWithPostInfo { - ensure_not_halted::()?; - let relayer_id_at_this_chain = ensure_signed(origin)?; - - // reject transactions that are declaring too many messages - ensure!( - MessageNonce::from(messages_count) <= T::MaxUnconfirmedMessagesAtInboundLane::get(), - Error::::TooManyMessagesInTheProof - ); - - // why do we need to know the weight of this (`receive_messages_proof`) call? Because - // we may want to return some funds for not-dispatching (or partially dispatching) some - // messages to the call origin (relayer). And this is done by returning actual weight - // from the call. But we only know dispatch weight of every messages. So to refund - // relayer because we have not dispatched Message, we need to: - // - // ActualWeight = DeclaredWeight - Message.DispatchWeight - // - // The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible - // to get pre-computed value (and it has been already computed by the executive). - let declared_weight = T::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - dispatch_weight, - ); - let mut actual_weight = declared_weight; - - // verify messages proof && convert proof into messages - let messages = verify_and_decode_messages_proof::< - T::SourceHeaderChain, - T::InboundMessageFee, - T::InboundPayload, - >(proof, messages_count) - .map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Rejecting invalid messages proof: {:?}", - err, - ); - - Error::::InvalidMessagesProof - })?; - - // dispatch messages and (optionally) update lane(s) state(s) - let mut total_messages = 0; - let mut valid_messages = 0; - let mut dispatch_weight_left = dispatch_weight; - for (lane_id, lane_data) in messages { - let mut lane = inbound_lane::(lane_id); - - if let Some(lane_state) = lane_data.lane_state { - let updated_latest_confirmed_nonce = lane.receive_state_update(lane_state); - if let Some(updated_latest_confirmed_nonce) = updated_latest_confirmed_nonce { - log::trace!( - target: "runtime::bridge-messages", - "Received lane {:?} state update: latest_confirmed_nonce={}", - lane_id, - updated_latest_confirmed_nonce, - ); - } - } - - for message in lane_data.messages { - debug_assert_eq!(message.key.lane_id, lane_id); - - // ensure that relayer has declared enough weight for dispatching next message - // on this lane. We can't dispatch lane messages out-of-order, so if declared - // weight is not enough, let's move to next lane - let dispatch_weight = T::MessageDispatch::dispatch_weight(&message); - if dispatch_weight > dispatch_weight_left { - log::trace!( - target: "runtime::bridge-messages", - "Cannot dispatch any more messages on lane {:?}. Weight: declared={}, left={}", - lane_id, - dispatch_weight, - dispatch_weight_left, - ); - break - } - total_messages += 1; - - let receival_result = lane.receive_message::( - &relayer_id_at_bridged_chain, - &relayer_id_at_this_chain, - message.key.nonce, - message.data, - ); - - // note that we're returning unspent weight to relayer even if message has been - // rejected by the lane. This allows relayers to submit spam transactions with - // e.g. the same set of already delivered messages over and over again, without - // losing funds for messages dispatch. But keep in mind that relayer pays base - // delivery transaction cost anyway. And base cost covers everything except - // dispatch, so we have a balance here. - let (unspent_weight, refund_pay_dispatch_fee) = match receival_result { - ReceivalResult::Dispatched(dispatch_result) => { - valid_messages += 1; - ( - dispatch_result.unspent_weight, - !dispatch_result.dispatch_fee_paid_during_dispatch, - ) - }, - ReceivalResult::InvalidNonce | - ReceivalResult::TooManyUnrewardedRelayers | - ReceivalResult::TooManyUnconfirmedMessages => (dispatch_weight, true), - }; - - let unspent_weight = sp_std::cmp::min(unspent_weight, dispatch_weight); - dispatch_weight_left -= dispatch_weight - unspent_weight; - actual_weight = actual_weight.saturating_sub(unspent_weight).saturating_sub( - // delivery call weight formula assumes that the fee is paid at - // this (target) chain. If the message is prepaid at the source - // chain, let's refund relayer with this extra cost. - if refund_pay_dispatch_fee { - T::WeightInfo::pay_inbound_dispatch_fee_overhead() - } else { - 0 - }, - ); - } - } - - log::trace!( - target: "runtime::bridge-messages", - "Received messages: total={}, valid={}. Weight used: {}/{}", - total_messages, - valid_messages, - actual_weight, - declared_weight, - ); - - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) - } - - /// Receive messages delivery proof from bridged chain. - #[pallet::weight(T::WeightInfo::receive_messages_delivery_proof_weight( - proof, - relayers_state, - T::DbWeight::get(), - ))] - pub fn receive_messages_delivery_proof( - origin: OriginFor, - proof: MessagesDeliveryProofOf, - relayers_state: UnrewardedRelayersState, - ) -> DispatchResultWithPostInfo { - ensure_not_halted::()?; - - // why do we need to know the weight of this (`receive_messages_delivery_proof`) call? - // Because we may want to return some funds for messages that are not processed by the - // delivery callback, or if their actual processing weight is less than accounted by - // weight formula. So to refund relayer, we need to: - // - // ActualWeight = DeclaredWeight - UnspentCallbackWeight - // - // The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible - // to get pre-computed value (and it has been already computed by the executive). - let single_message_callback_overhead = - T::WeightInfo::single_message_callback_overhead(T::DbWeight::get()); - let declared_weight = T::WeightInfo::receive_messages_delivery_proof_weight( - &proof, - &relayers_state, - T::DbWeight::get(), - ); - let mut actual_weight = declared_weight; - - let confirmation_relayer = ensure_signed(origin)?; - let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof) - .map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Rejecting invalid messages delivery proof: {:?}", - err, - ); - - Error::::InvalidMessagesDeliveryProof - })?; - - // verify that the relayer has declared correct `lane_data::relayers` state - // (we only care about total number of entries and messages, because this affects call - // weight) - ensure!( - total_unrewarded_messages(&lane_data.relayers).unwrap_or(MessageNonce::MAX) == - relayers_state.total_messages && - lane_data.relayers.len() as MessageNonce == - relayers_state.unrewarded_relayer_entries, - Error::::InvalidUnrewardedRelayersState - ); - - // mark messages as delivered - let mut lane = outbound_lane::(lane_id); - let last_delivered_nonce = lane_data.last_delivered_nonce(); - let confirmed_messages = match lane.confirm_delivery( - relayers_state.total_messages, - last_delivered_nonce, - &lane_data.relayers, - ) { - ReceivalConfirmationResult::ConfirmedMessages(confirmed_messages) => - Some(confirmed_messages), - ReceivalConfirmationResult::NoNewConfirmations => None, - ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected( - to_confirm_messages_count, - ) => { - log::trace!( - target: "runtime::bridge-messages", - "Messages delivery proof contains too many messages to confirm: {} vs declared {}", - to_confirm_messages_count, - relayers_state.total_messages, - ); - - fail!(Error::::TryingToConfirmMoreMessagesThanExpected); - }, - error => { - log::trace!( - target: "runtime::bridge-messages", - "Messages delivery proof contains invalid unrewarded relayers vec: {:?}", - error, - ); - - fail!(Error::::InvalidUnrewardedRelayers); - }, - }; - - if let Some(confirmed_messages) = confirmed_messages { - // handle messages delivery confirmation - let preliminary_callback_overhead = - relayers_state.total_messages.saturating_mul(single_message_callback_overhead); - let actual_callback_weight = - T::OnDeliveryConfirmed::on_messages_delivered(&lane_id, &confirmed_messages); - match preliminary_callback_overhead.checked_sub(actual_callback_weight) { - Some(difference) if difference == 0 => (), - Some(difference) => { - log::trace!( - target: "runtime::bridge-messages", - "T::OnDeliveryConfirmed callback has spent less weight than expected. Refunding: \ - {} - {} = {}", - preliminary_callback_overhead, - actual_callback_weight, - difference, - ); - actual_weight = actual_weight.saturating_sub(difference); - }, - None => { - debug_assert!( - false, - "T::OnDeliveryConfirmed callback consumed too much weight." - ); - log::error!( - target: "runtime::bridge-messages", - "T::OnDeliveryConfirmed callback has spent more weight that it is allowed to: \ - {} vs {}", - preliminary_callback_overhead, - actual_callback_weight, - ); - }, - } - - // emit 'delivered' event - let received_range = confirmed_messages.begin..=confirmed_messages.end; - Self::deposit_event(Event::MessagesDelivered { - lane_id, - messages: confirmed_messages, - }); - - // if some new messages have been confirmed, reward relayers - let relayer_fund_account = - relayer_fund_account_id::(); - >::MessageDeliveryAndDispatchPayment::pay_relayers_rewards( - lane_id, - lane_data.relayers, - &confirmation_relayer, - &received_range, - &relayer_fund_account, - ); - } - - log::trace!( - target: "runtime::bridge-messages", - "Received messages delivery proof up to (and including) {} at lane {:?}", - last_delivered_nonce, - lane_id, - ); - - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event, I: 'static = ()> { - /// Pallet parameter has been updated. - ParameterUpdated { parameter: T::Parameter }, - /// Message has been accepted and is waiting to be delivered. - MessageAccepted { lane_id: LaneId, nonce: MessageNonce }, - /// Messages in the inclusive range have been delivered to the bridged chain. - MessagesDelivered { lane_id: LaneId, messages: DeliveredMessages }, - } - - #[pallet::error] - pub enum Error { - /// All pallet operations are halted. - Halted, - /// Message has been treated as invalid by chain verifier. - MessageRejectedByChainVerifier, - /// Message has been treated as invalid by lane verifier. - MessageRejectedByLaneVerifier, - /// Submitter has failed to pay fee for delivering and dispatching messages. - FailedToWithdrawMessageFee, - /// The transaction brings too many messages. - TooManyMessagesInTheProof, - /// Invalid messages has been submitted. - InvalidMessagesProof, - /// Invalid messages delivery proof has been submitted. - InvalidMessagesDeliveryProof, - /// The bridged chain has invalid `UnrewardedRelayers` in its storage (fatal for the lane). - InvalidUnrewardedRelayers, - /// The relayer has declared invalid unrewarded relayers state in the - /// `receive_messages_delivery_proof` call. - InvalidUnrewardedRelayersState, - /// The message someone is trying to work with (i.e. increase fee) is already-delivered. - MessageIsAlreadyDelivered, - /// The message someone is trying to work with (i.e. increase fee) is not yet sent. - MessageIsNotYetSent, - /// The number of actually confirmed messages is going to be larger than the number of - /// messages in the proof. This may mean that this or bridged chain storage is corrupted. - TryingToConfirmMoreMessagesThanExpected, - } - - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - #[pallet::storage] - #[pallet::getter(fn module_owner)] - pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId>; - - /// The current operating mode of the pallet. - /// - /// Depending on the mode either all, some, or no transactions will be allowed. - #[pallet::storage] - #[pallet::getter(fn operating_mode)] - pub type PalletOperatingMode, I: 'static = ()> = - StorageValue<_, OperatingMode, ValueQuery>; - - /// Map of lane id => inbound lane data. - #[pallet::storage] - pub type InboundLanes, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, LaneId, InboundLaneData, ValueQuery>; - - /// Map of lane id => outbound lane data. - #[pallet::storage] - pub type OutboundLanes, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, LaneId, OutboundLaneData, ValueQuery>; - - /// All queued outbound messages. - #[pallet::storage] - pub type OutboundMessages, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, MessageKey, MessageData>; - - #[pallet::genesis_config] - pub struct GenesisConfig, I: 'static = ()> { - /// Initial pallet operating mode. - pub operating_mode: OperatingMode, - /// Initial pallet owner. - pub owner: Option, - /// Dummy marker. - pub phantom: sp_std::marker::PhantomData, - } - - #[cfg(feature = "std")] - impl, I: 'static> Default for GenesisConfig { - fn default() -> Self { - Self { - operating_mode: Default::default(), - owner: Default::default(), - phantom: Default::default(), - } - } - } - - #[pallet::genesis_build] - impl, I: 'static> GenesisBuild for GenesisConfig { - fn build(&self) { - PalletOperatingMode::::put(&self.operating_mode); - if let Some(ref owner) = self.owner { - PalletOwner::::put(owner); - } - } - } - - impl, I: 'static> Pallet { - /// Get stored data of the outbound message with given nonce. - pub fn outbound_message_data( - lane: LaneId, - nonce: MessageNonce, - ) -> Option> { - OutboundMessages::::get(MessageKey { lane_id: lane, nonce }) - } - } -} - -/// AccountId of the shared relayer fund account. -/// -/// This account is passed to `MessageDeliveryAndDispatchPayment` trait, and depending -/// on the implementation it can be used to store relayers rewards. -/// See [`InstantCurrencyPayments`] for a concrete implementation. -pub fn relayer_fund_account_id>( -) -> AccountId { - let encoded_id = bp_runtime::derive_relayer_fund_account_id(bp_runtime::NO_INSTANCE_ID); - AccountIdConverter::convert(encoded_id) -} - -impl - bp_messages::source_chain::MessagesBridge< - T::Origin, - T::AccountId, - T::OutboundMessageFee, - T::OutboundPayload, - > for Pallet -where - T: Config, - I: 'static, -{ - type Error = sp_runtime::DispatchErrorWithPostInfo; - - fn send_message( - sender: T::Origin, - lane: LaneId, - message: T::OutboundPayload, - delivery_and_dispatch_fee: T::OutboundMessageFee, - ) -> Result { - crate::send_message::(sender, lane, message, delivery_and_dispatch_fee) - } -} - -/// Function that actually sends message. -fn send_message, I: 'static>( - submitter: T::Origin, - lane_id: LaneId, - payload: T::OutboundPayload, - delivery_and_dispatch_fee: T::OutboundMessageFee, -) -> sp_std::result::Result< - SendMessageArtifacts, - sp_runtime::DispatchErrorWithPostInfo, -> { - ensure_normal_operating_mode::()?; - - // initially, actual (post-dispatch) weight is equal to pre-dispatch weight - let mut actual_weight = T::WeightInfo::send_message_weight(&payload, T::DbWeight::get()); - - // let's first check if message can be delivered to target chain - T::TargetHeaderChain::verify_message(&payload).map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Message to lane {:?} is rejected by target chain: {:?}", - lane_id, - err, - ); - - Error::::MessageRejectedByChainVerifier - })?; - - // now let's enforce any additional lane rules - let mut lane = outbound_lane::(lane_id); - T::LaneMessageVerifier::verify_message( - &submitter, - &delivery_and_dispatch_fee, - &lane_id, - &lane.data(), - &payload, - ) - .map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Message to lane {:?} is rejected by lane verifier: {:?}", - lane_id, - err, - ); - - Error::::MessageRejectedByLaneVerifier - })?; - - // let's withdraw delivery and dispatch fee from submitter - T::MessageDeliveryAndDispatchPayment::pay_delivery_and_dispatch_fee( - &submitter, - &delivery_and_dispatch_fee, - &relayer_fund_account_id::(), - ) - .map_err(|err| { - log::trace!( - target: "runtime::bridge-messages", - "Message to lane {:?} is rejected because submitter is unable to pay fee {:?}: {:?}", - lane_id, - delivery_and_dispatch_fee, - err, - ); - - Error::::FailedToWithdrawMessageFee - })?; - - // finally, save message in outbound storage and emit event - let encoded_payload = payload.encode(); - let encoded_payload_len = encoded_payload.len(); - let nonce = - lane.send_message(MessageData { payload: encoded_payload, fee: delivery_and_dispatch_fee }); - // Guaranteed to be called outside only when the message is accepted. - // We assume that the maximum weight call back used is `single_message_callback_overhead`, so do - // not perform complex db operation in callback. If you want to, put these magic logic in - // outside pallet and control the weight there. - let single_message_callback_overhead = - T::WeightInfo::single_message_callback_overhead(T::DbWeight::get()); - let actual_callback_weight = T::OnMessageAccepted::on_messages_accepted(&lane_id, &nonce); - match single_message_callback_overhead.checked_sub(actual_callback_weight) { - Some(difference) if difference == 0 => (), - Some(difference) => { - log::trace!( - target: "runtime::bridge-messages", - "T::OnMessageAccepted callback has spent less weight than expected. Refunding: \ - {} - {} = {}", - single_message_callback_overhead, - actual_callback_weight, - difference, - ); - actual_weight = actual_weight.saturating_sub(difference); - }, - None => { - debug_assert!(false, "T::OnMessageAccepted callback consumed too much weight."); - log::error!( - target: "runtime::bridge-messages", - "T::OnMessageAccepted callback has spent more weight that it is allowed to: \ - {} vs {}", - single_message_callback_overhead, - actual_callback_weight, - ); - }, - } - - // message sender pays for pruning at most `MaxMessagesToPruneAtOnce` messages - // the cost of pruning every message is roughly single db write - // => lets refund sender if less than `MaxMessagesToPruneAtOnce` messages pruned - let max_messages_to_prune = T::MaxMessagesToPruneAtOnce::get(); - let pruned_messages = lane.prune_messages(max_messages_to_prune); - if let Some(extra_messages) = max_messages_to_prune.checked_sub(pruned_messages) { - actual_weight = actual_weight.saturating_sub(T::DbWeight::get().writes(extra_messages)); - } - - log::trace!( - target: "runtime::bridge-messages", - "Accepted message {} to lane {:?}. Message size: {:?}", - nonce, - lane_id, - encoded_payload_len, - ); - - Pallet::::deposit_event(Event::MessageAccepted { lane_id, nonce }); - - Ok(SendMessageArtifacts { nonce, weight: actual_weight }) -} - -/// Ensure that the origin is either root, or `PalletOwner`. -fn ensure_owner_or_root, I: 'static>(origin: T::Origin) -> Result<(), BadOrigin> { - match origin.into() { - Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) - if Some(signer) == Pallet::::module_owner().as_ref() => - Ok(()), - _ => Err(BadOrigin), - } -} - -/// Ensure that the pallet is in normal operational mode. -fn ensure_normal_operating_mode, I: 'static>() -> Result<(), Error> { - if PalletOperatingMode::::get() != OperatingMode::Normal { - Err(Error::::Halted) - } else { - Ok(()) - } -} - -/// Ensure that the pallet is not halted. -fn ensure_not_halted, I: 'static>() -> Result<(), Error> { - if PalletOperatingMode::::get() == OperatingMode::Halted { - Err(Error::::Halted) - } else { - Ok(()) - } -} - -/// Creates new inbound lane object, backed by runtime storage. -fn inbound_lane, I: 'static>( - lane_id: LaneId, -) -> InboundLane> { - InboundLane::new(inbound_lane_storage::(lane_id)) -} - -/// Creates new runtime inbound lane storage. -fn inbound_lane_storage, I: 'static>( - lane_id: LaneId, -) -> RuntimeInboundLaneStorage { - RuntimeInboundLaneStorage { - lane_id, - cached_data: RefCell::new(None), - _phantom: Default::default(), - } -} - -/// Creates new outbound lane object, backed by runtime storage. -fn outbound_lane, I: 'static>( - lane_id: LaneId, -) -> OutboundLane> { - OutboundLane::new(RuntimeOutboundLaneStorage { lane_id, _phantom: Default::default() }) -} - -/// Runtime inbound lane storage. -struct RuntimeInboundLaneStorage, I: 'static = ()> { - lane_id: LaneId, - cached_data: RefCell>>, - _phantom: PhantomData, -} - -impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage { - type MessageFee = T::InboundMessageFee; - type Relayer = T::InboundRelayer; - - fn id(&self) -> LaneId { - self.lane_id - } - - fn max_unrewarded_relayer_entries(&self) -> MessageNonce { - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() - } - - fn max_unconfirmed_messages(&self) -> MessageNonce { - T::MaxUnconfirmedMessagesAtInboundLane::get() - } - - fn data(&self) -> InboundLaneData { - match self.cached_data.clone().into_inner() { - Some(data) => data, - None => { - let data = InboundLanes::::get(&self.lane_id); - *self.cached_data.try_borrow_mut().expect( - "we're in the single-threaded environment;\ - we have no recursive borrows; qed", - ) = Some(data.clone()); - data - }, - } - } - - fn set_data(&mut self, data: InboundLaneData) { - *self.cached_data.try_borrow_mut().expect( - "we're in the single-threaded environment;\ - we have no recursive borrows; qed", - ) = Some(data.clone()); - InboundLanes::::insert(&self.lane_id, data) - } -} - -/// Runtime outbound lane storage. -struct RuntimeOutboundLaneStorage { - lane_id: LaneId, - _phantom: PhantomData<(T, I)>, -} - -impl, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorage { - type MessageFee = T::OutboundMessageFee; - - fn id(&self) -> LaneId { - self.lane_id - } - - fn data(&self) -> OutboundLaneData { - OutboundLanes::::get(&self.lane_id) - } - - fn set_data(&mut self, data: OutboundLaneData) { - OutboundLanes::::insert(&self.lane_id, data) - } - - #[cfg(test)] - fn message(&self, nonce: &MessageNonce) -> Option> { - OutboundMessages::::get(MessageKey { lane_id: self.lane_id, nonce: *nonce }) - } - - fn save_message( - &mut self, - nonce: MessageNonce, - mesage_data: MessageData, - ) { - OutboundMessages::::insert(MessageKey { lane_id: self.lane_id, nonce }, mesage_data); - } - - fn remove_message(&mut self, nonce: &MessageNonce) { - OutboundMessages::::remove(MessageKey { lane_id: self.lane_id, nonce: *nonce }); - } -} - -/// Verify messages proof and return proved messages with decoded payload. -fn verify_and_decode_messages_proof, Fee, DispatchPayload: Decode>( - proof: Chain::MessagesProof, - messages_count: u32, -) -> Result>, Chain::Error> { - // `receive_messages_proof` weight formula and `MaxUnconfirmedMessagesAtInboundLane` check - // guarantees that the `message_count` is sane and Vec may be allocated. - // (tx with too many messages will either be rejected from the pool, or will fail earlier) - Chain::verify_messages_proof(proof, messages_count).map(|messages_by_lane| { - messages_by_lane - .into_iter() - .map(|(lane, lane_data)| { - ( - lane, - ProvedLaneMessages { - lane_state: lane_data.lane_state, - messages: lane_data.messages.into_iter().map(Into::into).collect(), - }, - ) - }) - .collect() - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - message, message_payload, run_test, unrewarded_relayer, Event as TestEvent, Origin, - TestMessageDeliveryAndDispatchPayment, TestMessagesDeliveryProof, TestMessagesParameter, - TestMessagesProof, TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2, - TestOnMessageAccepted, TestRuntime, TokenConversionRate, PAYLOAD_REJECTED_BY_TARGET_CHAIN, - REGULAR_PAYLOAD, TEST_LANE_ID, TEST_RELAYER_A, TEST_RELAYER_B, - }; - use bp_messages::{UnrewardedRelayer, UnrewardedRelayersState}; - use frame_support::{ - assert_noop, assert_ok, - storage::generator::{StorageMap, StorageValue}, - weights::Weight, - }; - use frame_system::{EventRecord, Pallet as System, Phase}; - use sp_runtime::DispatchError; - - fn get_ready_for_events() { - System::::set_block_number(1); - System::::reset_events(); - } - - fn inbound_unrewarded_relayers_state( - lane: bp_messages::LaneId, - ) -> bp_messages::UnrewardedRelayersState { - let relayers = InboundLanes::::get(&lane).relayers; - bp_messages::UnrewardedRelayersState { - unrewarded_relayer_entries: relayers.len() as _, - messages_in_oldest_entry: relayers - .front() - .map(|entry| 1 + entry.messages.end - entry.messages.begin) - .unwrap_or(0), - total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), - } - } - - fn send_regular_message() -> Weight { - get_ready_for_events(); - - let message_nonce = - outbound_lane::(TEST_LANE_ID).data().latest_generated_nonce + 1; - let weight = Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - REGULAR_PAYLOAD.declared_weight, - ) - .expect("send_message has failed") - .actual_weight - .expect("send_message always returns Some"); - - // check event with assigned nonce - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessageAccepted { - lane_id: TEST_LANE_ID, - nonce: message_nonce, - }), - topics: vec![], - }], - ); - - // check that fee has been withdrawn from submitter - assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid( - 1, - REGULAR_PAYLOAD.declared_weight - )); - - weight - } - - fn receive_messages_delivery_proof() { - System::::set_block_number(1); - System::::reset_events(); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: DeliveredMessages::new(1, true), - }] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - )); - - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessagesDelivered { - lane_id: TEST_LANE_ID, - messages: DeliveredMessages::new(1, true), - }), - topics: vec![], - }], - ); - } - - #[test] - fn pallet_owner_may_change_owner() { - run_test(|| { - PalletOwner::::put(2); - - assert_ok!(Pallet::::set_owner(Origin::root(), Some(1))); - assert_noop!( - Pallet::::set_operating_mode(Origin::signed(2), OperatingMode::Halted), - DispatchError::BadOrigin, - ); - assert_ok!(Pallet::::set_operating_mode( - Origin::root(), - OperatingMode::Halted - )); - - assert_ok!(Pallet::::set_owner(Origin::signed(1), None)); - assert_noop!( - Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Normal), - DispatchError::BadOrigin, - ); - assert_noop!( - Pallet::::set_operating_mode(Origin::signed(2), OperatingMode::Normal), - DispatchError::BadOrigin, - ); - assert_ok!(Pallet::::set_operating_mode( - Origin::root(), - OperatingMode::Normal - )); - }); - } - - #[test] - fn pallet_may_be_halted_by_root() { - run_test(|| { - assert_ok!(Pallet::::set_operating_mode( - Origin::root(), - OperatingMode::Halted - )); - assert_ok!(Pallet::::set_operating_mode( - Origin::root(), - OperatingMode::Normal - )); - }); - } - - #[test] - fn pallet_may_be_halted_by_owner() { - run_test(|| { - PalletOwner::::put(2); - - assert_ok!(Pallet::::set_operating_mode( - Origin::signed(2), - OperatingMode::Halted - )); - assert_ok!(Pallet::::set_operating_mode( - Origin::signed(2), - OperatingMode::Normal - )); - - assert_noop!( - Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Halted), - DispatchError::BadOrigin, - ); - assert_noop!( - Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Normal), - DispatchError::BadOrigin, - ); - - assert_ok!(Pallet::::set_operating_mode( - Origin::signed(2), - OperatingMode::Halted - )); - assert_noop!( - Pallet::::set_operating_mode(Origin::signed(1), OperatingMode::Normal), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn pallet_parameter_may_be_updated_by_root() { - run_test(|| { - get_ready_for_events(); - - let parameter = TestMessagesParameter::TokenConversionRate(10.into()); - assert_ok!(Pallet::::update_pallet_parameter( - Origin::root(), - parameter.clone(), - )); - - assert_eq!(TokenConversionRate::get(), 10.into()); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::ParameterUpdated { parameter }), - topics: vec![], - }], - ); - }); - } - - #[test] - fn pallet_parameter_may_be_updated_by_owner() { - run_test(|| { - PalletOwner::::put(2); - get_ready_for_events(); - - let parameter = TestMessagesParameter::TokenConversionRate(10.into()); - assert_ok!(Pallet::::update_pallet_parameter( - Origin::signed(2), - parameter.clone(), - )); - - assert_eq!(TokenConversionRate::get(), 10.into()); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::ParameterUpdated { parameter }), - topics: vec![], - }], - ); - }); - } - - #[test] - fn pallet_parameter_cant_be_updated_by_arbitrary_submitter() { - run_test(|| { - assert_noop!( - Pallet::::update_pallet_parameter( - Origin::signed(2), - TestMessagesParameter::TokenConversionRate(10.into()), - ), - DispatchError::BadOrigin, - ); - - PalletOwner::::put(2); - - assert_noop!( - Pallet::::update_pallet_parameter( - Origin::signed(1), - TestMessagesParameter::TokenConversionRate(10.into()), - ), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn fixed_u128_works_as_i_think() { - // this test is here just to be sure that conversion rate may be represented with FixedU128 - run_test(|| { - use sp_runtime::{FixedPointNumber, FixedU128}; - - // 1:1 conversion that we use by default for testnets - let rialto_token = 1u64; - let rialto_token_in_millau_tokens = - TokenConversionRate::get().saturating_mul_int(rialto_token); - assert_eq!(rialto_token_in_millau_tokens, 1); - - // let's say conversion rate is 1:1.7 - let conversion_rate = FixedU128::saturating_from_rational(170, 100); - let rialto_tokens = 100u64; - let rialto_tokens_in_millau_tokens = conversion_rate.saturating_mul_int(rialto_tokens); - assert_eq!(rialto_tokens_in_millau_tokens, 170); - - // let's say conversion rate is 1:0.25 - let conversion_rate = FixedU128::saturating_from_rational(25, 100); - let rialto_tokens = 100u64; - let rialto_tokens_in_millau_tokens = conversion_rate.saturating_mul_int(rialto_tokens); - assert_eq!(rialto_tokens_in_millau_tokens, 25); - }); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(); - - PalletOperatingMode::::put(OperatingMode::Halted); - - assert_noop!( - Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::Halted, - ); - - assert_noop!( - Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1,), - Error::::Halted, - ); - - assert_noop!( - Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::Halted, - ); - - assert_noop!( - Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - }, - ), - Error::::Halted, - ); - }); - } - - #[test] - fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(); - - PalletOperatingMode::::put(OperatingMode::RejectingOutboundMessages); - - assert_noop!( - Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::Halted, - ); - - assert_ok!(Pallet::::increase_message_fee( - Origin::signed(1), - TEST_LANE_ID, - 1, - 1, - )); - - assert_ok!(Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - }, - )); - }); - } - - #[test] - fn send_message_works() { - run_test(|| { - send_regular_message(); - }); - } - - #[test] - fn chain_verifier_rejects_invalid_message_in_send_message() { - run_test(|| { - // messages with this payload are rejected by target chain verifier - assert_noop!( - Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - PAYLOAD_REJECTED_BY_TARGET_CHAIN, - PAYLOAD_REJECTED_BY_TARGET_CHAIN.declared_weight - ), - Error::::MessageRejectedByChainVerifier, - ); - }); - } - - #[test] - fn lane_verifier_rejects_invalid_message_in_send_message() { - run_test(|| { - // messages with zero fee are rejected by lane verifier - assert_noop!( - Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - 0 - ), - Error::::MessageRejectedByLaneVerifier, - ); - }); - } - - #[test] - fn message_send_fails_if_submitter_cant_pay_message_fee() { - run_test(|| { - TestMessageDeliveryAndDispatchPayment::reject_payments(); - assert_noop!( - Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - REGULAR_PAYLOAD.declared_weight - ), - Error::::FailedToWithdrawMessageFee, - ); - }); - } - - #[test] - fn receive_messages_proof_works() { - run_test(|| { - assert_ok!(Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1); - }); - } - - #[test] - fn receive_messages_proof_updates_confirmed_message_nonce() { - run_test(|| { - // say we have received 10 messages && last confirmed message is 8 - InboundLanes::::insert( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 8, - relayers: vec![ - unrewarded_relayer(9, 9, TEST_RELAYER_A), - unrewarded_relayer(10, 10, TEST_RELAYER_B), - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - }, - ); - - // message proof includes outbound lane state with latest confirmed message updated to 9 - let mut message_proof: TestMessagesProof = - Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); - message_proof.result.as_mut().unwrap()[0].1.lane_state = - Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() }); - - assert_ok!(Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - message_proof, - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!( - InboundLanes::::get(TEST_LANE_ID), - InboundLaneData { - last_confirmed_nonce: 9, - relayers: vec![ - unrewarded_relayer(10, 10, TEST_RELAYER_B), - unrewarded_relayer(11, 11, TEST_RELAYER_A) - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - }, - ); - }); - } - - #[test] - fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() { - run_test(|| { - assert_ok!(Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight - 1, - )); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); - }); - } - - #[test] - fn receive_messages_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Err(()).into(), - 1, - 0, - ), - Error::::InvalidMessagesProof, - ); - }); - } - - #[test] - fn receive_messages_proof_rejects_proof_with_too_many_messages() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - u32::MAX, - 0, - ), - Error::::TooManyMessagesInTheProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_works() { - run_test(|| { - send_regular_message(); - receive_messages_delivery_proof(); - - assert_eq!( - OutboundLanes::::get(&TEST_LANE_ID).latest_received_nonce, - 1, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rewards_relayers() { - run_test(|| { - assert_ok!(Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - 1000, - )); - assert_ok!(Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - REGULAR_PAYLOAD, - 2000, - )); - - // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A - assert_ok!(Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - )); - assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000)); - assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000)); - - // this reports delivery of both message 1 and message 2 => reward is paid only to - // TEST_RELAYER_B - assert_ok!(Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 2, - ..Default::default() - }, - )); - assert!(!TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_A, 1000)); - assert!(TestMessageDeliveryAndDispatchPayment::is_reward_paid(TEST_RELAYER_B, 2000)); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Err(())), - Default::default(), - ), - Error::::InvalidMessagesDeliveryProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { - run_test(|| { - // when number of relayers entries is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when number of messages is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 1, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - }); - } - - #[test] - fn receive_messages_accepts_single_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(1, REGULAR_PAYLOAD); - invalid_message.data.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok(vec![invalid_message]).into(), - 1, - 0, // weight may be zero in this case (all messages are improperly encoded) - )); - - assert_eq!(InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), 1,); - }); - } - - #[test] - fn receive_messages_accepts_batch_with_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(2, REGULAR_PAYLOAD); - invalid_message.data.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - Ok( - vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),] - ) - .into(), - 3, - REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!(InboundLanes::::get(&TEST_LANE_ID).last_delivered_nonce(), 3,); - }); - } - - #[test] - fn actual_dispatch_weight_does_not_overlow() { - run_test(|| { - let message1 = message(1, message_payload(0, Weight::MAX / 2)); - let message2 = message(2, message_payload(0, Weight::MAX / 2)); - let message3 = message(3, message_payload(0, Weight::MAX / 2)); - - assert_ok!(Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - // this may cause overflow if source chain storage is invalid - Ok(vec![message1, message2, message3]).into(), - 3, - Weight::MAX, - )); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 2); - }); - } - - #[test] - fn increase_message_fee_fails_if_message_is_already_delivered() { - run_test(|| { - send_regular_message(); - receive_messages_delivery_proof(); - - assert_noop!( - Pallet::::increase_message_fee( - Origin::signed(1), - TEST_LANE_ID, - 1, - 100, - ), - Error::::MessageIsAlreadyDelivered, - ); - }); - } - - #[test] - fn increase_message_fee_fails_if_message_is_not_yet_sent() { - run_test(|| { - assert_noop!( - Pallet::::increase_message_fee( - Origin::signed(1), - TEST_LANE_ID, - 1, - 100, - ), - Error::::MessageIsNotYetSent, - ); - }); - } - - #[test] - fn increase_message_fee_fails_if_submitter_cant_pay_additional_fee() { - run_test(|| { - send_regular_message(); - - TestMessageDeliveryAndDispatchPayment::reject_payments(); - - assert_noop!( - Pallet::::increase_message_fee( - Origin::signed(1), - TEST_LANE_ID, - 1, - 100, - ), - Error::::FailedToWithdrawMessageFee, - ); - }); - } - - #[test] - fn increase_message_fee_succeeds() { - run_test(|| { - send_regular_message(); - - assert_ok!(Pallet::::increase_message_fee( - Origin::signed(1), - TEST_LANE_ID, - 1, - 100, - )); - assert!(TestMessageDeliveryAndDispatchPayment::is_fee_paid(1, 100)); - }); - } - - #[test] - fn weight_refund_from_receive_messages_proof_works() { - run_test(|| { - fn submit_with_unspent_weight( - nonce: MessageNonce, - unspent_weight: Weight, - is_prepaid: bool, - ) -> (Weight, Weight) { - let mut payload = REGULAR_PAYLOAD; - payload.dispatch_result.unspent_weight = unspent_weight; - payload.dispatch_result.dispatch_fee_paid_during_dispatch = !is_prepaid; - let proof = Ok(vec![message(nonce, payload)]).into(); - let messages_count = 1; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); - let post_dispatch_weight = Pallet::::receive_messages_proof( - Origin::signed(1), - TEST_RELAYER_A, - proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .expect("delivery has failed") - .actual_weight - .expect("receive_messages_proof always returns Some"); - - (pre_dispatch_weight, post_dispatch_weight) - } - - // when dispatch is returning `unspent_weight < declared_weight` - let (pre, post) = submit_with_unspent_weight(1, 1, false); - assert_eq!(post, pre - 1); - - // when dispatch is returning `unspent_weight = declared_weight` - let (pre, post) = submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight, false); - assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight); - - // when dispatch is returning `unspent_weight > declared_weight` - let (pre, post) = - submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight + 1, false); - assert_eq!(post, pre - REGULAR_PAYLOAD.declared_weight); - - // when there's no unspent weight - let (pre, post) = submit_with_unspent_weight(4, 0, false); - assert_eq!(post, pre); - - // when dispatch is returning `unspent_weight < declared_weight` AND message is prepaid - let (pre, post) = submit_with_unspent_weight(5, 1, true); - assert_eq!( - post, - pre - 1 - ::WeightInfo::pay_inbound_dispatch_fee_overhead() - ); - }); - } - - #[test] - fn messages_delivered_callbacks_are_called() { - run_test(|| { - send_regular_message(); - send_regular_message(); - send_regular_message(); - - // messages 1+2 are confirmed in 1 tx, message 3 in a separate tx - // dispatch of message 2 has failed - let mut delivered_messages_1_and_2 = DeliveredMessages::new(1, true); - delivered_messages_1_and_2.note_dispatched_message(false); - let messages_1_and_2_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: delivered_messages_1_and_2.clone(), - }] - .into_iter() - .collect(), - }, - )); - let delivered_message_3 = DeliveredMessages::new(3, true); - let messages_3_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: delivered_message_3.clone(), - }] - .into_iter() - .collect(), - }, - )); - - // first tx with messages 1+2 - assert_ok!(Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(messages_1_and_2_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 2, - ..Default::default() - }, - )); - // second tx with message 3 - assert_ok!(Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(messages_3_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - )); - - // ensure that both callbacks have been called twice: for 1+2, then for 3 - TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); - TestOnDeliveryConfirmed1::ensure_called(&TEST_LANE_ID, &delivered_message_3); - TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_messages_1_and_2); - TestOnDeliveryConfirmed2::ensure_called(&TEST_LANE_ID, &delivered_message_3); - }); - } - - fn confirm_3_messages_delivery() -> (Weight, Weight) { - send_regular_message(); - send_regular_message(); - send_regular_message(); - - let proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)].into_iter().collect(), - }, - ))); - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 3, - ..Default::default() - }; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_delivery_proof_weight( - &proof, - &relayers_state, - crate::mock::DbWeight::get(), - ); - let post_dispatch_weight = Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - proof, - relayers_state, - ) - .expect("confirmation has failed") - .actual_weight - .expect("receive_messages_delivery_proof always returns Some"); - (pre_dispatch_weight, post_dispatch_weight) - } - - #[test] - fn receive_messages_delivery_proof_refunds_zero_weight() { - run_test(|| { - let (pre_dispatch_weight, post_dispatch_weight) = confirm_3_messages_delivery(); - assert_eq!(pre_dispatch_weight, post_dispatch_weight); - }); - } - - #[test] - fn receive_messages_delivery_proof_refunds_non_zero_weight() { - run_test(|| { - TestOnDeliveryConfirmed1::set_consumed_weight_per_message( - crate::mock::DbWeight::get().writes(1), - ); - - let (pre_dispatch_weight, post_dispatch_weight) = confirm_3_messages_delivery(); - assert_eq!( - pre_dispatch_weight.saturating_sub(post_dispatch_weight), - crate::mock::DbWeight::get().reads(1) * 3 - ); - }); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn receive_messages_panics_in_debug_mode_if_callback_is_wrong() { - run_test(|| { - TestOnDeliveryConfirmed1::set_consumed_weight_per_message( - crate::mock::DbWeight::get().reads_writes(2, 2), - ); - confirm_3_messages_delivery() - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected( - ) { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(); - - // 1) InboundLaneData declares that the `last_confirmed_nonce` is 1; - // 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()` - // returns `last_confirmed_nonce`; - // 3) it means that we're going to confirm delivery of messages 1..=1; - // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and - // numer of actually confirmed messages is `1`. - assert_noop!( - Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() }, - ))), - UnrewardedRelayersState::default(), - ), - Error::::TryingToConfirmMoreMessagesThanExpected, - ); - }); - } - - #[test] - fn increase_message_fee_weight_depends_on_message_size() { - run_test(|| { - let mut small_payload = message_payload(0, 100); - let mut large_payload = message_payload(1, 100); - small_payload.extra = vec![1; 100]; - large_payload.extra = vec![2; 16_384]; - - assert_ok!(Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - small_payload, - 100, - )); - assert_ok!(Pallet::::send_message( - Origin::signed(1), - TEST_LANE_ID, - large_payload, - 100, - )); - - let small_weight = - Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 1, 1) - .expect("increase_message_fee has failed") - .actual_weight - .expect("increase_message_fee always returns Some"); - - let large_weight = - Pallet::::increase_message_fee(Origin::signed(1), TEST_LANE_ID, 2, 1) - .expect("increase_message_fee has failed") - .actual_weight - .expect("increase_message_fee always returns Some"); - - assert!( - large_weight > small_weight, - "Actual post-dispatch weigth for larger message {} must be larger than {} for small message", - large_weight, - small_weight, - ); - }); - } - - #[test] - fn weight_is_refunded_for_messages_that_are_not_pruned() { - run_test(|| { - // send first MAX messages - no messages are pruned - let max_messages_to_prune = crate::mock::MaxMessagesToPruneAtOnce::get(); - let when_zero_messages_are_pruned = send_regular_message(); - let mut delivered_messages = DeliveredMessages::new(1, true); - for _ in 1..max_messages_to_prune { - assert_eq!(send_regular_message(), when_zero_messages_are_pruned); - delivered_messages.note_dispatched_message(true); - } - - // confirm delivery of all sent messages - assert_ok!(Pallet::::receive_messages_delivery_proof( - Origin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: delivered_messages, - }] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: max_messages_to_prune, - ..Default::default() - }, - )); - - // when next message is sent, MAX messages are pruned - let weight_when_max_messages_are_pruned = send_regular_message(); - assert_eq!( - weight_when_max_messages_are_pruned, - when_zero_messages_are_pruned + - crate::mock::DbWeight::get().writes(max_messages_to_prune), - ); - }); - } - - #[test] - fn message_accepted_callbacks_are_called() { - run_test(|| { - send_regular_message(); - TestOnMessageAccepted::ensure_called(&TEST_LANE_ID, &1); - }); - } - - #[test] - #[should_panic] - #[cfg(debug_assertions)] - fn message_accepted_panics_in_debug_mode_if_callback_is_wrong() { - run_test(|| { - TestOnMessageAccepted::set_consumed_weight_per_message( - crate::mock::DbWeight::get().reads_writes(2, 2), - ); - send_regular_message(); - }); - } - - #[test] - fn message_accepted_refunds_non_zero_weight() { - run_test(|| { - TestOnMessageAccepted::set_consumed_weight_per_message( - crate::mock::DbWeight::get().writes(1), - ); - let actual_callback_weight = send_regular_message(); - let pre_dispatch_weight = ::WeightInfo::send_message_weight( - ®ULAR_PAYLOAD, - crate::mock::DbWeight::get(), - ); - let prune_weight = crate::mock::DbWeight::get() - .writes(::MaxMessagesToPruneAtOnce::get()); - - assert_eq!( - pre_dispatch_weight.saturating_sub(actual_callback_weight), - crate::mock::DbWeight::get().reads(1).saturating_add(prune_weight) - ); - }); - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - PalletOperatingMode::::storage_value_final_key().to_vec(), - bp_messages::storage_keys::operating_mode_key("Messages").0, - ); - - assert_eq!( - OutboundMessages::::storage_map_final_key(MessageKey { - lane_id: TEST_LANE_ID, - nonce: 42 - }), - bp_messages::storage_keys::message_key("Messages", &TEST_LANE_ID, 42).0, - ); - - assert_eq!( - OutboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::outbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - - assert_eq!( - InboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::inbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - } -} diff --git a/polkadot/bridges/modules/messages/src/mock.rs b/polkadot/bridges/modules/messages/src/mock.rs deleted file mode 100644 index 75dcce8df04..00000000000 --- a/polkadot/bridges/modules/messages/src/mock.rs +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use crate::{instant_payments::cal_relayers_rewards, Config}; - -use bitvec::prelude::*; -use bp_messages::{ - source_chain::{ - LaneMessageVerifier, MessageDeliveryAndDispatchPayment, OnDeliveryConfirmed, - OnMessageAccepted, SenderOrigin, TargetHeaderChain, - }, - target_chain::{ - DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, SourceHeaderChain, - }, - DeliveredMessages, InboundLaneData, LaneId, Message, MessageData, MessageKey, MessageNonce, - OutboundLaneData, Parameter as MessagesParameter, UnrewardedRelayer, -}; -use bp_runtime::{messages::MessageDispatchResult, Size}; -use codec::{Decode, Encode}; -use frame_support::{ - parameter_types, - weights::{RuntimeDbWeight, Weight}, -}; -use scale_info::TypeInfo; -use sp_core::H256; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - FixedU128, Perbill, -}; -use std::{ - collections::{BTreeMap, VecDeque}, - ops::RangeInclusive, -}; - -pub type AccountId = u64; -pub type Balance = u64; -#[derive(Decode, Encode, Clone, Debug, PartialEq, Eq, TypeInfo)] -pub struct TestPayload { - /// Field that may be used to identify messages. - pub id: u64, - /// Dispatch weight that is declared by the message sender. - pub declared_weight: Weight, - /// Message dispatch result. - /// - /// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, - /// but for test purposes we'll be making it larger than `declared_weight` sometimes. - pub dispatch_result: MessageDispatchResult, - /// Extra bytes that affect payload size. - pub extra: Vec, -} -pub type TestMessageFee = u64; -pub type TestRelayer = u64; - -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: H256) -> AccountId { - hash.to_low_u64_ne() - } -} - -type Block = frame_system::mocking::MockBlock; -type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -use crate as pallet_bridge_messages; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Event}, - Messages: pallet_bridge_messages::{Pallet, Call, Event}, - } -} - -parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = SubstrateHeader; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = DbWeight; - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -parameter_types! { - pub const ExistentialDeposit: u64 = 1; -} - -impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = (); - type ReserveIdentifier = (); -} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: u64 = 10; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 32; - pub storage TokenConversionRate: FixedU128 = 1.into(); - pub const TestBridgedChainId: bp_runtime::ChainId = *b"test"; -} - -#[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, TypeInfo)] -pub enum TestMessagesParameter { - TokenConversionRate(FixedU128), -} - -impl MessagesParameter for TestMessagesParameter { - fn save(&self) { - match *self { - TestMessagesParameter::TokenConversionRate(conversion_rate) => - TokenConversionRate::set(&conversion_rate), - } - } -} - -impl Config for TestRuntime { - type Event = Event; - type WeightInfo = (); - type Parameter = TestMessagesParameter; - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = TestPayload; - type OutboundMessageFee = TestMessageFee; - - type InboundPayload = TestPayload; - type InboundMessageFee = TestMessageFee; - type InboundRelayer = TestRelayer; - - type AccountIdConverter = AccountIdConverter; - - type TargetHeaderChain = TestTargetHeaderChain; - type LaneMessageVerifier = TestLaneMessageVerifier; - type MessageDeliveryAndDispatchPayment = TestMessageDeliveryAndDispatchPayment; - type OnMessageAccepted = TestOnMessageAccepted; - type OnDeliveryConfirmed = (TestOnDeliveryConfirmed1, TestOnDeliveryConfirmed2); - - type SourceHeaderChain = TestSourceHeaderChain; - type MessageDispatch = TestMessageDispatch; - type BridgedChainId = TestBridgedChainId; -} - -impl SenderOrigin for Origin { - fn linked_account(&self) -> Option { - match self.caller { - OriginCaller::system(frame_system::RawOrigin::Signed(ref submitter)) => - Some(submitter.clone()), - _ => None, - } - } -} - -impl Size for TestPayload { - fn size_hint(&self) -> u32 { - 16 + self.extra.len() as u32 - } -} - -/// Account that has balance to use in tests. -pub const ENDOWED_ACCOUNT: AccountId = 0xDEAD; - -/// Account id of test relayer. -pub const TEST_RELAYER_A: AccountId = 100; - -/// Account id of additional test relayer - B. -pub const TEST_RELAYER_B: AccountId = 101; - -/// Account id of additional test relayer - C. -pub const TEST_RELAYER_C: AccountId = 102; - -/// Error that is returned by all test implementations. -pub const TEST_ERROR: &str = "Test error"; - -/// Lane that we're using in tests. -pub const TEST_LANE_ID: LaneId = [0, 0, 0, 1]; - -/// Regular message payload. -pub const REGULAR_PAYLOAD: TestPayload = message_payload(0, 50); - -/// Payload that is rejected by `TestTargetHeaderChain`. -pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = message_payload(1, 50); - -/// Vec of proved messages, grouped by lane. -pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages>)>; - -/// Test messages proof. -#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub struct TestMessagesProof { - pub result: Result, -} - -impl Size for TestMessagesProof { - fn size_hint(&self) -> u32 { - 0 - } -} - -impl From>, ()>> for TestMessagesProof { - fn from(result: Result>, ()>) -> Self { - Self { - result: result.map(|messages| { - let mut messages_by_lane: BTreeMap< - LaneId, - ProvedLaneMessages>, - > = BTreeMap::new(); - for message in messages { - messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message); - } - messages_by_lane.into_iter().collect() - }), - } - } -} - -/// Messages delivery proof used in tests. -#[derive(Debug, Encode, Decode, Eq, Clone, PartialEq, TypeInfo)] -pub struct TestMessagesDeliveryProof(pub Result<(LaneId, InboundLaneData), ()>); - -impl Size for TestMessagesDeliveryProof { - fn size_hint(&self) -> u32 { - 0 - } -} - -/// Target header chain that is used in tests. -#[derive(Debug, Default)] -pub struct TestTargetHeaderChain; - -impl TargetHeaderChain for TestTargetHeaderChain { - type Error = &'static str; - - type MessagesDeliveryProof = TestMessagesDeliveryProof; - - fn verify_message(payload: &TestPayload) -> Result<(), Self::Error> { - if *payload == PAYLOAD_REJECTED_BY_TARGET_CHAIN { - Err(TEST_ERROR) - } else { - Ok(()) - } - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - proof.0.map_err(|_| TEST_ERROR) - } -} - -/// Lane message verifier that is used in tests. -#[derive(Debug, Default)] -pub struct TestLaneMessageVerifier; - -impl LaneMessageVerifier - for TestLaneMessageVerifier -{ - type Error = &'static str; - - fn verify_message( - _submitter: &Origin, - delivery_and_dispatch_fee: &TestMessageFee, - _lane: &LaneId, - _lane_outbound_data: &OutboundLaneData, - _payload: &TestPayload, - ) -> Result<(), Self::Error> { - if *delivery_and_dispatch_fee != 0 { - Ok(()) - } else { - Err(TEST_ERROR) - } - } -} - -/// Message fee payment system that is used in tests. -#[derive(Debug, Default)] -pub struct TestMessageDeliveryAndDispatchPayment; - -impl TestMessageDeliveryAndDispatchPayment { - /// Reject all payments. - pub fn reject_payments() { - frame_support::storage::unhashed::put(b":reject-message-fee:", &true); - } - - /// Returns true if given fee has been paid by given submitter. - pub fn is_fee_paid(submitter: AccountId, fee: TestMessageFee) -> bool { - let raw_origin: Result, _> = Origin::signed(submitter).into(); - frame_support::storage::unhashed::get(b":message-fee:") == Some((raw_origin.unwrap(), fee)) - } - - /// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is - /// cleared after the call. - pub fn is_reward_paid(relayer: AccountId, fee: TestMessageFee) -> bool { - let key = (b":relayer-reward:", relayer, fee).encode(); - frame_support::storage::unhashed::take::(&key).is_some() - } -} - -impl MessageDeliveryAndDispatchPayment - for TestMessageDeliveryAndDispatchPayment -{ - type Error = &'static str; - - fn pay_delivery_and_dispatch_fee( - submitter: &Origin, - fee: &TestMessageFee, - _relayer_fund_account: &AccountId, - ) -> Result<(), Self::Error> { - if frame_support::storage::unhashed::get(b":reject-message-fee:") == Some(true) { - return Err(TEST_ERROR) - } - - let raw_origin: Result, _> = submitter.clone().into(); - frame_support::storage::unhashed::put(b":message-fee:", &(raw_origin.unwrap(), fee)); - Ok(()) - } - - fn pay_relayers_rewards( - lane_id: LaneId, - message_relayers: VecDeque>, - _confirmation_relayer: &AccountId, - received_range: &RangeInclusive, - _relayer_fund_account: &AccountId, - ) { - let relayers_rewards = - cal_relayers_rewards::(lane_id, message_relayers, received_range); - for (relayer, reward) in &relayers_rewards { - let key = (b":relayer-reward:", relayer, reward.reward).encode(); - frame_support::storage::unhashed::put(&key, &true); - } - } -} - -#[derive(Debug)] -pub struct TestOnMessageAccepted; - -impl TestOnMessageAccepted { - /// Verify that the callback has been called when the message is accepted. - pub fn ensure_called(lane: &LaneId, message: &MessageNonce) { - let key = (b"TestOnMessageAccepted", lane, message).encode(); - assert_eq!(frame_support::storage::unhashed::get(&key), Some(true)); - } - - /// Set consumed weight returned by the callback. - pub fn set_consumed_weight_per_message(weight: Weight) { - frame_support::storage::unhashed::put(b"TestOnMessageAccepted_Weight", &weight); - } - - /// Get consumed weight returned by the callback. - pub fn get_consumed_weight_per_message() -> Option { - frame_support::storage::unhashed::get(b"TestOnMessageAccepted_Weight") - } -} - -impl OnMessageAccepted for TestOnMessageAccepted { - fn on_messages_accepted(lane: &LaneId, message: &MessageNonce) -> Weight { - let key = (b"TestOnMessageAccepted", lane, message).encode(); - frame_support::storage::unhashed::put(&key, &true); - Self::get_consumed_weight_per_message() - .unwrap_or_else(|| DbWeight::get().reads_writes(1, 1)) - } -} - -/// First on-messages-delivered callback. -#[derive(Debug)] -pub struct TestOnDeliveryConfirmed1; - -impl TestOnDeliveryConfirmed1 { - /// Verify that the callback has been called with given delivered messages. - pub fn ensure_called(lane: &LaneId, messages: &DeliveredMessages) { - let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode(); - assert_eq!(frame_support::storage::unhashed::get(&key), Some(true)); - } - - /// Set consumed weight returned by the callback. - pub fn set_consumed_weight_per_message(weight: Weight) { - frame_support::storage::unhashed::put(b"TestOnDeliveryConfirmed1_Weight", &weight); - } - - /// Get consumed weight returned by the callback. - pub fn get_consumed_weight_per_message() -> Option { - frame_support::storage::unhashed::get(b"TestOnDeliveryConfirmed1_Weight") - } -} - -impl OnDeliveryConfirmed for TestOnDeliveryConfirmed1 { - fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight { - let key = (b"TestOnDeliveryConfirmed1", lane, messages).encode(); - frame_support::storage::unhashed::put(&key, &true); - Self::get_consumed_weight_per_message() - .unwrap_or_else(|| DbWeight::get().reads_writes(1, 1)) - .saturating_mul(messages.total_messages()) - } -} - -/// Second on-messages-delivered callback. -#[derive(Debug)] -pub struct TestOnDeliveryConfirmed2; - -impl TestOnDeliveryConfirmed2 { - /// Verify that the callback has been called with given delivered messages. - pub fn ensure_called(lane: &LaneId, messages: &DeliveredMessages) { - let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode(); - assert_eq!(frame_support::storage::unhashed::get(&key), Some(true)); - } -} - -impl OnDeliveryConfirmed for TestOnDeliveryConfirmed2 { - fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight { - let key = (b"TestOnDeliveryConfirmed2", lane, messages).encode(); - frame_support::storage::unhashed::put(&key, &true); - 0 - } -} - -/// Source header chain that is used in tests. -#[derive(Debug)] -pub struct TestSourceHeaderChain; - -impl SourceHeaderChain for TestSourceHeaderChain { - type Error = &'static str; - - type MessagesProof = TestMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result>, Self::Error> { - proof.result.map(|proof| proof.into_iter().collect()).map_err(|_| TEST_ERROR) - } -} - -/// Source header chain that is used in tests. -#[derive(Debug)] -pub struct TestMessageDispatch; - -impl MessageDispatch for TestMessageDispatch { - type DispatchPayload = TestPayload; - - fn dispatch_weight(message: &DispatchMessage) -> Weight { - match message.data.payload.as_ref() { - Ok(payload) => payload.declared_weight, - Err(_) => 0, - } - } - - fn dispatch( - _relayer_account: &AccountId, - message: DispatchMessage, - ) -> MessageDispatchResult { - match message.data.payload.as_ref() { - Ok(payload) => payload.dispatch_result.clone(), - Err(_) => dispatch_result(0), - } - } -} - -/// Return test lane message with given nonce and payload. -pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message { - Message { key: MessageKey { lane_id: TEST_LANE_ID, nonce }, data: message_data(payload) } -} - -/// Constructs message payload using given arguments and zero unspent weight. -pub const fn message_payload(id: u64, declared_weight: Weight) -> TestPayload { - TestPayload { id, declared_weight, dispatch_result: dispatch_result(0), extra: Vec::new() } -} - -/// Return message data with valid fee for given payload. -pub fn message_data(payload: TestPayload) -> MessageData { - MessageData { payload: payload.encode(), fee: 1 } -} - -/// Returns message dispatch result with given unspent weight. -pub const fn dispatch_result(unspent_weight: Weight) -> MessageDispatchResult { - MessageDispatchResult { - dispatch_result: true, - unspent_weight, - dispatch_fee_paid_during_dispatch: true, - } -} - -/// Constructs unrewarded relayer entry from nonces range and relayer id. -pub fn unrewarded_relayer( - begin: MessageNonce, - end: MessageNonce, - relayer: TestRelayer, -) -> UnrewardedRelayer { - UnrewardedRelayer { - relayer, - messages: DeliveredMessages { - begin, - end, - dispatch_results: if end >= begin { - bitvec![u8, Msb0; 1; (end - begin + 1) as _] - } else { - Default::default() - }, - }, - } -} - -/// Run pallet test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(ENDOWED_ACCOUNT, 1_000_000)] } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(test) -} diff --git a/polkadot/bridges/modules/messages/src/outbound_lane.rs b/polkadot/bridges/modules/messages/src/outbound_lane.rs deleted file mode 100644 index cfdc81acc31..00000000000 --- a/polkadot/bridges/modules/messages/src/outbound_lane.rs +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything about outgoing messages sending. - -use bitvec::prelude::*; -use bp_messages::{ - DeliveredMessages, DispatchResultsBitVec, LaneId, MessageData, MessageNonce, OutboundLaneData, - UnrewardedRelayer, -}; -use frame_support::RuntimeDebug; -use sp_std::collections::vec_deque::VecDeque; - -/// Outbound lane storage. -pub trait OutboundLaneStorage { - /// Delivery and dispatch fee type on source chain. - type MessageFee; - - /// Lane id. - fn id(&self) -> LaneId; - /// Get lane data from the storage. - fn data(&self) -> OutboundLaneData; - /// Update lane data in the storage. - fn set_data(&mut self, data: OutboundLaneData); - /// Returns saved outbound message payload. - #[cfg(test)] - fn message(&self, nonce: &MessageNonce) -> Option>; - /// Save outbound message in the storage. - fn save_message(&mut self, nonce: MessageNonce, message_data: MessageData); - /// Remove outbound message from the storage. - fn remove_message(&mut self, nonce: &MessageNonce); -} - -/// Result of messages receival confirmation. -#[derive(RuntimeDebug, PartialEq, Eq)] -pub enum ReceivalConfirmationResult { - /// New messages have been confirmed by the confirmation transaction. - ConfirmedMessages(DeliveredMessages), - /// Confirmation transaction brings no new confirmation. This may be a result of relayer - /// error or several relayers running. - NoNewConfirmations, - /// Bridged chain is trying to confirm more messages than we have generated. May be a result - /// of invalid bridged chain storage. - FailedToConfirmFutureMessages, - /// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged - /// chain storage. - EmptyUnrewardedRelayerEntry, - /// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid - /// bridged chain storage. - NonConsecutiveUnrewardedRelayerEntries, - /// The unrewarded relayers vec contains entry with mismatched number of dispatch results. May - /// be a result of invalid bridged chain storage. - InvalidNumberOfDispatchResults, - /// The chain has more messages that need to be confirmed than there is in the proof. - TryingToConfirmMoreMessagesThanExpected(MessageNonce), -} - -/// Outbound messages lane. -pub struct OutboundLane { - storage: S, -} - -impl OutboundLane { - /// Create new inbound lane backed by given storage. - pub fn new(storage: S) -> Self { - OutboundLane { storage } - } - - /// Get this lane data. - pub fn data(&self) -> OutboundLaneData { - self.storage.data() - } - - /// Send message over lane. - /// - /// Returns new message nonce. - pub fn send_message(&mut self, message_data: MessageData) -> MessageNonce { - let mut data = self.storage.data(); - let nonce = data.latest_generated_nonce + 1; - data.latest_generated_nonce = nonce; - - self.storage.save_message(nonce, message_data); - self.storage.set_data(data); - - nonce - } - - /// Confirm messages delivery. - pub fn confirm_delivery( - &mut self, - max_allowed_messages: MessageNonce, - latest_delivered_nonce: MessageNonce, - relayers: &VecDeque>, - ) -> ReceivalConfirmationResult { - let mut data = self.storage.data(); - if latest_delivered_nonce <= data.latest_received_nonce { - return ReceivalConfirmationResult::NoNewConfirmations - } - if latest_delivered_nonce > data.latest_generated_nonce { - return ReceivalConfirmationResult::FailedToConfirmFutureMessages - } - if latest_delivered_nonce - data.latest_received_nonce > max_allowed_messages { - // that the relayer has declared correct number of messages that the proof contains (it - // is checked outside of the function). But it may happen (but only if this/bridged - // chain storage is corrupted, though) that the actual number of confirmed messages if - // larger than declared. This would mean that 'reward loop' will take more time than the - // weight formula accounts, so we can't allow that. - return ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected( - latest_delivered_nonce - data.latest_received_nonce, - ) - } - - let dispatch_results = match extract_dispatch_results( - data.latest_received_nonce, - latest_delivered_nonce, - relayers, - ) { - Ok(dispatch_results) => dispatch_results, - Err(extract_error) => return extract_error, - }; - - let prev_latest_received_nonce = data.latest_received_nonce; - data.latest_received_nonce = latest_delivered_nonce; - self.storage.set_data(data); - - ReceivalConfirmationResult::ConfirmedMessages(DeliveredMessages { - begin: prev_latest_received_nonce + 1, - end: latest_delivered_nonce, - dispatch_results, - }) - } - - /// Prune at most `max_messages_to_prune` already received messages. - /// - /// Returns number of pruned messages. - pub fn prune_messages(&mut self, max_messages_to_prune: MessageNonce) -> MessageNonce { - let mut pruned_messages = 0; - let mut anything_changed = false; - let mut data = self.storage.data(); - while pruned_messages < max_messages_to_prune && - data.oldest_unpruned_nonce <= data.latest_received_nonce - { - self.storage.remove_message(&data.oldest_unpruned_nonce); - - anything_changed = true; - pruned_messages += 1; - data.oldest_unpruned_nonce += 1; - } - - if anything_changed { - self.storage.set_data(data); - } - - pruned_messages - } -} - -/// Extract new dispatch results from the unrewarded relayers vec. -/// -/// Returns `Err(_)` if unrewarded relayers vec contains invalid data, meaning that the bridged -/// chain has invalid runtime storage. -fn extract_dispatch_results( - prev_latest_received_nonce: MessageNonce, - latest_received_nonce: MessageNonce, - relayers: &VecDeque>, -) -> Result { - // the only caller of this functions checks that the - // prev_latest_received_nonce..=latest_received_nonce is valid, so we're ready to accept - // messages in this range => with_capacity call must succeed here or we'll be unable to receive - // confirmations at all - let mut received_dispatch_result = - BitVec::with_capacity((latest_received_nonce - prev_latest_received_nonce + 1) as _); - let mut last_entry_end: Option = None; - for entry in relayers { - // unrewarded relayer entry must have at least 1 unconfirmed message - // (guaranteed by the `InboundLane::receive_message()`) - if entry.messages.end < entry.messages.begin { - return Err(ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry) - } - // every entry must confirm range of messages that follows previous entry range - // (guaranteed by the `InboundLane::receive_message()`) - if let Some(last_entry_end) = last_entry_end { - let expected_entry_begin = last_entry_end.checked_add(1); - if expected_entry_begin != Some(entry.messages.begin) { - return Err(ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries) - } - } - last_entry_end = Some(entry.messages.end); - // entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()` - // (guaranteed by the `InboundLane::receive_message()`) - if entry.messages.end > latest_received_nonce { - // technically this will be detected in the next loop iteration as - // `InvalidNumberOfDispatchResults` but to guarantee safety of loop operations below - // this is detected now - return Err(ReceivalConfirmationResult::FailedToConfirmFutureMessages) - } - // entry must have single dispatch result for every message - // (guaranteed by the `InboundLane::receive_message()`) - if entry.messages.dispatch_results.len() as MessageNonce != - entry.messages.end - entry.messages.begin + 1 - { - return Err(ReceivalConfirmationResult::InvalidNumberOfDispatchResults) - } - - // now we know that the entry is valid - // => let's check if it brings new confirmations - let new_messages_begin = - sp_std::cmp::max(entry.messages.begin, prev_latest_received_nonce + 1); - let new_messages_end = sp_std::cmp::min(entry.messages.end, latest_received_nonce); - let new_messages_range = new_messages_begin..=new_messages_end; - if new_messages_range.is_empty() { - continue - } - - // now we know that entry brings new confirmations - // => let's extract dispatch results - received_dispatch_result.extend_from_bitslice( - &entry.messages.dispatch_results - [(new_messages_begin - entry.messages.begin) as usize..], - ); - } - - Ok(received_dispatch_result) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, REGULAR_PAYLOAD, - TEST_LANE_ID, - }, - outbound_lane, - }; - use sp_std::ops::RangeInclusive; - - fn unrewarded_relayers( - nonces: RangeInclusive, - ) -> VecDeque> { - vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)] - .into_iter() - .collect() - } - - fn delivered_messages(nonces: RangeInclusive) -> DeliveredMessages { - DeliveredMessages { - begin: *nonces.start(), - end: *nonces.end(), - dispatch_results: bitvec![u8, Msb0; 1; (nonces.end() - nonces.start() + 1) as _], - } - } - - fn assert_3_messages_confirmation_fails( - latest_received_nonce: MessageNonce, - relayers: &VecDeque>, - ) -> ReceivalConfirmationResult { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - let result = lane.confirm_delivery(3, latest_received_nonce, relayers); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - result - }) - } - - #[test] - fn send_message_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - assert_eq!(lane.storage.data().latest_generated_nonce, 0); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 1); - assert!(lane.storage.message(&1).is_some()); - assert_eq!(lane.storage.data().latest_generated_nonce, 1); - }); - } - - #[test] - fn confirm_delivery_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 1); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 2); - assert_eq!(lane.send_message(message_data(REGULAR_PAYLOAD)), 3); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!( - lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), - ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), - ); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - }); - } - - #[test] - fn confirm_delivery_rejects_nonce_lesser_than_latest_received() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!( - lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), - ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), - ); - assert_eq!( - lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), - ReceivalConfirmationResult::NoNewConfirmations, - ); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - - assert_eq!( - lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)), - ReceivalConfirmationResult::NoNewConfirmations, - ); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - }); - } - - #[test] - fn confirm_delivery_rejects_nonce_larger_than_last_generated() { - assert_eq!( - assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),), - ReceivalConfirmationResult::FailedToConfirmFutureMessages, - ); - } - - #[test] - fn confirm_delivery_fails_if_entry_confirms_future_messages() { - assert_eq!( - assert_3_messages_confirmation_fails( - 3, - &unrewarded_relayers(1..=1) - .into_iter() - .chain(unrewarded_relayers(2..=30).into_iter()) - .chain(unrewarded_relayers(3..=3).into_iter()) - .collect(), - ), - ReceivalConfirmationResult::FailedToConfirmFutureMessages, - ); - } - - #[test] - #[allow(clippy::reversed_empty_ranges)] - fn confirm_delivery_fails_if_entry_is_empty() { - assert_eq!( - assert_3_messages_confirmation_fails( - 3, - &unrewarded_relayers(1..=1) - .into_iter() - .chain(unrewarded_relayers(2..=1).into_iter()) - .chain(unrewarded_relayers(2..=3).into_iter()) - .collect(), - ), - ReceivalConfirmationResult::EmptyUnrewardedRelayerEntry, - ); - } - - #[test] - fn confirm_delivery_fails_if_entries_are_non_consecutive() { - assert_eq!( - assert_3_messages_confirmation_fails( - 3, - &unrewarded_relayers(1..=1) - .into_iter() - .chain(unrewarded_relayers(3..=3).into_iter()) - .chain(unrewarded_relayers(2..=2).into_iter()) - .collect(), - ), - ReceivalConfirmationResult::NonConsecutiveUnrewardedRelayerEntries, - ); - } - - #[test] - fn confirm_delivery_fails_if_number_of_dispatch_results_in_entry_is_invalid() { - let mut relayers: VecDeque<_> = unrewarded_relayers(1..=1) - .into_iter() - .chain(unrewarded_relayers(2..=2).into_iter()) - .chain(unrewarded_relayers(3..=3).into_iter()) - .collect(); - relayers[0].messages.dispatch_results.clear(); - assert_eq!( - assert_3_messages_confirmation_fails(3, &relayers), - ReceivalConfirmationResult::InvalidNumberOfDispatchResults, - ); - } - - #[test] - fn prune_messages_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - // when lane is empty, nothing is pruned - assert_eq!(lane.prune_messages(100), 0); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); - // when nothing is confirmed, nothing is pruned - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.prune_messages(100), 0); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); - // after confirmation, some messages are received - assert_eq!( - lane.confirm_delivery(2, 2, &unrewarded_relayers(1..=2)), - ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=2)), - ); - assert_eq!(lane.prune_messages(100), 2); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3); - // after last message is confirmed, everything is pruned - assert_eq!( - lane.confirm_delivery(1, 3, &unrewarded_relayers(3..=3)), - ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(3..=3)), - ); - assert_eq!(lane.prune_messages(100), 1); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4); - }); - } - - #[test] - fn confirm_delivery_detects_when_more_than_expected_messages_are_confirmed() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - lane.send_message(message_data(REGULAR_PAYLOAD)); - assert_eq!( - lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)), - ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(3), - ); - assert_eq!( - lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)), - ReceivalConfirmationResult::TryingToConfirmMoreMessagesThanExpected(3), - ); - assert_eq!( - lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), - ReceivalConfirmationResult::ConfirmedMessages(delivered_messages(1..=3)), - ); - }); - } -} diff --git a/polkadot/bridges/modules/messages/src/weights.rs b/polkadot/bridges/modules/messages/src/weights.rs deleted file mode 100644 index 462f768a08b..00000000000 --- a/polkadot/bridges/modules/messages/src/weights.rs +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for `pallet_bridge_messages` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-12-28, STEPS: 50, REPEAT: 20 -//! LOW RANGE: [], HIGH RANGE: [] -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled -//! CHAIN: Some("dev"), DB CACHE: 128 - -// Executed Command: -// target/release/millau-bridge-node -// benchmark -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_bridge_messages -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/messages/src/weights.rs -// --template=./.maintain/millau-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for `pallet_bridge_messages`. -pub trait WeightInfo { - fn send_minimal_message_worst_case() -> Weight; - fn send_1_kb_message_worst_case() -> Weight; - fn send_16_kb_message_worst_case() -> Weight; - fn maximal_increase_message_fee() -> Weight; - fn increase_message_fee(i: u32) -> Weight; - fn receive_single_message_proof() -> Weight; - fn receive_two_messages_proof() -> Weight; - fn receive_single_message_proof_with_outbound_lane_state() -> Weight; - fn receive_single_message_proof_1_kb() -> Weight; - fn receive_single_message_proof_16_kb() -> Weight; - fn receive_single_prepaid_message_proof() -> Weight; - fn receive_delivery_proof_for_single_message() -> Weight; - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight; - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight; -} - -/// Weights for `pallet_bridge_messages` using the Millau node and recommended hardware. -pub struct MillauWeight(PhantomData); -impl WeightInfo for MillauWeight { - fn send_minimal_message_worst_case() -> Weight { - (117_480_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - } - fn send_1_kb_message_worst_case() -> Weight { - (128_391_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - } - fn send_16_kb_message_worst_case() -> Weight { - (149_149_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(12 as Weight)) - } - fn maximal_increase_message_fee() -> Weight { - (6_015_058_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn increase_message_fee(i: u32) -> Weight { - (0 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof() -> Weight { - (179_892_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_two_messages_proof() -> Weight { - (291_793_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (192_191_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof_1_kb() -> Weight { - (202_104_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof_16_kb() -> Weight { - (357_144_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_single_prepaid_message_proof() -> Weight { - (122_648_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - fn receive_delivery_proof_for_single_message() -> Weight { - (107_631_000 as Weight) - .saturating_add(T::DbWeight::get().reads(6 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (113_885_000 as Weight) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (155_151_000 as Weight) - .saturating_add(T::DbWeight::get().reads(8 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - fn send_minimal_message_worst_case() -> Weight { - (117_480_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - } - fn send_1_kb_message_worst_case() -> Weight { - (128_391_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - } - fn send_16_kb_message_worst_case() -> Weight { - (149_149_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(12 as Weight)) - } - fn maximal_increase_message_fee() -> Weight { - (6_015_058_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn increase_message_fee(i: u32) -> Weight { - (0 as Weight) - .saturating_add((2_000 as Weight).saturating_mul(i as Weight)) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof() -> Weight { - (179_892_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_two_messages_proof() -> Weight { - (291_793_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - (192_191_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof_1_kb() -> Weight { - (202_104_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_single_message_proof_16_kb() -> Weight { - (357_144_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_single_prepaid_message_proof() -> Weight { - (122_648_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - fn receive_delivery_proof_for_single_message() -> Weight { - (107_631_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(6 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - (113_885_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - (155_151_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(8 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } -} diff --git a/polkadot/bridges/modules/messages/src/weights_ext.rs b/polkadot/bridges/modules/messages/src/weights_ext.rs deleted file mode 100644 index 483a22eda1d..00000000000 --- a/polkadot/bridges/modules/messages/src/weights_ext.rs +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Weight-related utilities. - -use crate::weights::WeightInfo; - -use bp_messages::{MessageNonce, UnrewardedRelayersState}; -use bp_runtime::{PreComputedSize, Size}; -use frame_support::weights::{RuntimeDbWeight, Weight}; - -/// Size of the message being delivered in benchmarks. -pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128; - -/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of -/// calls we're checking here would fit 1KB. -const SIGNED_EXTENSIONS_SIZE: u32 = 1024; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// Rialto chain. This mostly depends on number of entries (and their density) in the storage trie. -/// Some reserve is reserved to account future chain growth. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Ensure that weights from `WeightInfoExt` implementation are looking correct. -pub fn ensure_weights_are_correct( - expected_default_message_delivery_tx_weight: Weight, - expected_additional_byte_delivery_weight: Weight, - expected_messages_delivery_confirmation_tx_weight: Weight, - expected_pay_inbound_dispatch_fee_weight: Weight, - db_weight: RuntimeDbWeight, -) { - // verify `send_message` weight components - assert_ne!(W::send_message_overhead(), 0); - assert_ne!(W::send_message_size_overhead(0), 0); - - // verify `receive_messages_proof` weight components - assert_ne!(W::receive_messages_proof_overhead(), 0); - assert_ne!(W::receive_messages_proof_messages_overhead(1), 0); - assert_ne!(W::receive_messages_proof_outbound_lane_state_overhead(), 0); - assert_ne!(W::storage_proof_size_overhead(1), 0); - - // verify that the hardcoded value covers `receive_messages_proof` weight - let actual_single_regular_message_delivery_tx_weight = W::receive_messages_proof_weight( - &PreComputedSize( - (EXPECTED_DEFAULT_MESSAGE_LENGTH + W::expected_extra_storage_proof_size()) as usize, - ), - 1, - 0, - ); - assert!( - actual_single_regular_message_delivery_tx_weight <= - expected_default_message_delivery_tx_weight, - "Default message delivery transaction weight {} is larger than expected weight {}", - actual_single_regular_message_delivery_tx_weight, - expected_default_message_delivery_tx_weight, - ); - - // verify that hardcoded value covers additional byte length of `receive_messages_proof` weight - let actual_additional_byte_delivery_weight = W::storage_proof_size_overhead(1); - assert!( - actual_additional_byte_delivery_weight <= expected_additional_byte_delivery_weight, - "Single additional byte delivery weight {} is larger than expected weight {}", - actual_additional_byte_delivery_weight, - expected_additional_byte_delivery_weight, - ); - - // verify `receive_messages_delivery_proof` weight components - assert_ne!(W::receive_messages_delivery_proof_overhead(), 0); - assert_ne!(W::receive_messages_delivery_proof_messages_overhead(1), 0); - assert_ne!(W::receive_messages_delivery_proof_relayers_overhead(1), 0); - assert_ne!(W::storage_proof_size_overhead(1), 0); - - // verify that the hardcoded value covers `receive_messages_delivery_proof` weight - let actual_messages_delivery_confirmation_tx_weight = W::receive_messages_delivery_proof_weight( - &PreComputedSize(W::expected_extra_storage_proof_size() as usize), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - db_weight, - ); - assert!( - actual_messages_delivery_confirmation_tx_weight <= - expected_messages_delivery_confirmation_tx_weight, - "Messages delivery confirmation transaction weight {} is larger than expected weight {}", - actual_messages_delivery_confirmation_tx_weight, - expected_messages_delivery_confirmation_tx_weight, - ); - - // verify pay-dispatch-fee overhead for inbound messages - let actual_pay_inbound_dispatch_fee_weight = W::pay_inbound_dispatch_fee_overhead(); - assert!( - actual_pay_inbound_dispatch_fee_weight <= expected_pay_inbound_dispatch_fee_weight, - "Weight {} of pay-dispatch-fee overhead for inbound messages is larger than expected weight {}", - actual_pay_inbound_dispatch_fee_weight, - expected_pay_inbound_dispatch_fee_weight, - ); -} - -/// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain. -pub fn ensure_able_to_receive_message( - max_extrinsic_size: u32, - max_extrinsic_weight: Weight, - max_incoming_message_proof_size: u32, - max_incoming_message_dispatch_weight: Weight, -) { - // verify that we're able to receive proof of maximal-size message - let max_delivery_transaction_size = - max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); - assert!( - max_delivery_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery transaction {} + {} is larger than maximal possible transaction size {}", - max_incoming_message_proof_size, - SIGNED_EXTENSIONS_SIZE, - max_extrinsic_size, - ); - - // verify that we're able to receive proof of maximal-size message with maximal dispatch weight - let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight( - &PreComputedSize( - (max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize, - ), - 1, - max_incoming_message_dispatch_weight, - ); - assert!( - max_delivery_transaction_dispatch_weight <= max_extrinsic_weight, - "Weight of maximal message delivery transaction + {} is larger than maximal possible transaction weight {}", - max_delivery_transaction_dispatch_weight, - max_extrinsic_weight, - ); -} - -/// Ensure that we're able to receive maximal confirmation from other chain. -pub fn ensure_able_to_receive_confirmation( - max_extrinsic_size: u32, - max_extrinsic_weight: Weight, - max_inbound_lane_data_proof_size_from_peer_chain: u32, - max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce, - max_unconfirmed_messages_at_inbound_lane: MessageNonce, - db_weight: RuntimeDbWeight, -) { - // verify that we're able to receive confirmation of maximal-size - let max_confirmation_transaction_size = - max_inbound_lane_data_proof_size_from_peer_chain.saturating_add(SIGNED_EXTENSIONS_SIZE); - assert!( - max_confirmation_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery confirmation transaction {} + {} is larger than maximal possible transaction size {}", - max_inbound_lane_data_proof_size_from_peer_chain, - SIGNED_EXTENSIONS_SIZE, - max_extrinsic_size, - ); - - // verify that we're able to reward maximal number of relayers that have delivered maximal - // number of messages - let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight( - &PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize), - &UnrewardedRelayersState { - unrewarded_relayer_entries: max_unrewarded_relayer_entries_at_peer_inbound_lane, - total_messages: max_unconfirmed_messages_at_inbound_lane, - ..Default::default() - }, - db_weight, - ); - assert!( - max_confirmation_transaction_dispatch_weight <= max_extrinsic_weight, - "Weight of maximal confirmation transaction {} is larger than maximal possible transaction weight {}", - max_confirmation_transaction_dispatch_weight, - max_extrinsic_weight, - ); -} - -/// Extended weight info. -pub trait WeightInfoExt: WeightInfo { - /// Size of proof that is already included in the single message delivery weight. - /// - /// The message submitter (at source chain) has already covered this cost. But there are two - /// factors that may increase proof size: (1) the message size may be larger than predefined - /// and (2) relayer may add extra trie nodes to the proof. So if proof size is larger than - /// this value, we're going to charge relayer for that. - fn expected_extra_storage_proof_size() -> u32; - - // Functions that are directly mapped to extrinsics weights. - - /// Weight of message send extrinsic. - fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight { - let transaction_overhead = Self::send_message_overhead(); - let message_size_overhead = Self::send_message_size_overhead(message.size_hint()); - let call_back_overhead = Self::single_message_callback_overhead(db_weight); - - transaction_overhead - .saturating_add(message_size_overhead) - .saturating_add(call_back_overhead) - } - - /// Weight of message delivery extrinsic. - fn receive_messages_proof_weight( - proof: &impl Size, - messages_count: u32, - dispatch_weight: Weight, - ) -> Weight { - // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_proof_overhead(); - let outbound_state_delivery_weight = - Self::receive_messages_proof_outbound_lane_state_overhead(); - let messages_delivery_weight = - Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count)); - let messages_dispatch_weight = dispatch_weight; - - // proof size overhead weight - let expected_proof_size = EXPECTED_DEFAULT_MESSAGE_LENGTH - .saturating_mul(messages_count.saturating_sub(1)) - .saturating_add(Self::expected_extra_storage_proof_size()); - let actual_proof_size = proof.size_hint(); - let proof_size_overhead = Self::storage_proof_size_overhead( - actual_proof_size.saturating_sub(expected_proof_size), - ); - - transaction_overhead - .saturating_add(outbound_state_delivery_weight) - .saturating_add(messages_delivery_weight) - .saturating_add(messages_dispatch_weight) - .saturating_add(proof_size_overhead) - } - - /// Weight of confirmation delivery extrinsic. - fn receive_messages_delivery_proof_weight( - proof: &impl Size, - relayers_state: &UnrewardedRelayersState, - db_weight: RuntimeDbWeight, - ) -> Weight { - // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_delivery_proof_overhead(); - let messages_overhead = - Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages); - let relayers_overhead = Self::receive_messages_delivery_proof_relayers_overhead( - relayers_state.unrewarded_relayer_entries, - ); - - // proof size overhead weight - let expected_proof_size = Self::expected_extra_storage_proof_size(); - let actual_proof_size = proof.size_hint(); - let proof_size_overhead = Self::storage_proof_size_overhead( - actual_proof_size.saturating_sub(expected_proof_size), - ); - - // and cost of calling `OnDeliveryConfirmed::on_messages_delivered()` for every confirmed - // message - let callback_overhead = relayers_state - .total_messages - .saturating_mul(Self::single_message_callback_overhead(db_weight)); - - transaction_overhead - .saturating_add(messages_overhead) - .saturating_add(relayers_overhead) - .saturating_add(proof_size_overhead) - .saturating_add(callback_overhead) - } - - // Functions that are used by extrinsics weights formulas. - - /// Returns weight of message send transaction (`send_message`). - fn send_message_overhead() -> Weight { - Self::send_minimal_message_worst_case() - } - - /// Returns weight that needs to be accounted when message of given size is sent - /// (`send_message`). - fn send_message_size_overhead(message_size: u32) -> Weight { - let message_size_in_kb = (1024u64 + message_size as u64) / 1024; - let single_kb_weight = - (Self::send_16_kb_message_worst_case() - Self::send_1_kb_message_worst_case()) / 15; - message_size_in_kb * single_kb_weight - } - - /// Returns weight overhead of message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = - Self::receive_single_message_proof().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - weight_of_two_messages_and_two_tx_overheads - .saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving given a number of messages with - /// message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof(); - weight_of_two_messages_and_single_tx_overhead - .saturating_sub(weight_of_single_message_and_single_tx_overhead) - .saturating_mul(messages as Weight) - } - - /// Returns weight that needs to be accounted when message delivery transaction - /// (`receive_messages_proof`) is carrying outbound lane state proof. - fn receive_messages_proof_outbound_lane_state_overhead() -> Weight { - let weight_of_single_message_and_lane_state = - Self::receive_single_message_proof_with_outbound_lane_state(); - let weight_of_single_message = Self::receive_single_message_proof(); - weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message) - } - - /// Returns weight overhead of delivery confirmation transaction - /// (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = - Self::receive_delivery_proof_for_single_message().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - weight_of_two_messages_and_two_tx_overheads - .saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving confirmations for given a number of - /// messages with delivery confirmation transaction (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - let weight_of_single_message = Self::receive_delivery_proof_for_single_message(); - weight_of_two_messages - .saturating_sub(weight_of_single_message) - .saturating_mul(messages as Weight) - } - - /// Returns weight that needs to be accounted when receiving confirmations for given a number of - /// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight { - let weight_of_two_messages_by_two_relayers = - Self::receive_delivery_proof_for_two_messages_by_two_relayers(); - let weight_of_two_messages_by_single_relayer = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - weight_of_two_messages_by_two_relayers - .saturating_sub(weight_of_two_messages_by_single_relayer) - .saturating_mul(relayers as Weight) - } - - /// Returns weight that needs to be accounted when storage proof of given size is received - /// (either in `receive_messages_proof` or `receive_messages_delivery_proof`). - /// - /// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof - /// size depends on messages count or number of entries in the unrewarded relayers set. So this - /// shouldn't be added to cost of transaction, but instead should act as a minimal cost that the - /// relayer must pay when it relays proof of given size (even if cost based on other parameters - /// is less than that cost). - fn storage_proof_size_overhead(proof_size: u32) -> Weight { - let proof_size_in_bytes = proof_size as Weight; - let byte_weight = (Self::receive_single_message_proof_16_kb() - - Self::receive_single_message_proof_1_kb()) / - (15 * 1024); - proof_size_in_bytes * byte_weight - } - - /// Returns weight of the pay-dispatch-fee operation for inbound messages. - /// - /// This function may return zero if runtime doesn't support pay-dispatch-fee-at-target-chain - /// option. - fn pay_inbound_dispatch_fee_overhead() -> Weight { - Self::receive_single_message_proof() - .saturating_sub(Self::receive_single_prepaid_message_proof()) - } - - /// Returns pre-dispatch weight of single callback call. - /// - /// When benchmarking the weight please take into consideration both the `OnMessageAccepted` and - /// `OnDeliveryConfirmed` callbacks. The method should return the greater of the two, because - /// it's used to estimate the weight in both contexts. - fn single_message_callback_overhead(db_weight: RuntimeDbWeight) -> Weight { - db_weight.reads_writes(1, 1) - } -} - -impl WeightInfoExt for () { - fn expected_extra_storage_proof_size() -> u32 { - EXTRA_STORAGE_PROOF_SIZE - } -} - -impl WeightInfoExt for crate::weights::MillauWeight { - fn expected_extra_storage_proof_size() -> u32 { - EXTRA_STORAGE_PROOF_SIZE - } -} diff --git a/polkadot/bridges/modules/shift-session-manager/Cargo.toml b/polkadot/bridges/modules/shift-session-manager/Cargo.toml deleted file mode 100644 index 30a5618b115..00000000000 --- a/polkadot/bridges/modules/shift-session-manager/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "pallet-shift-session-manager" -description = "A Substrate Runtime module that selects 2/3 of initial validators for every session" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "pallet-session/std", - "scale-info/std", - "sp-staking/std", - "sp-std/std", -] diff --git a/polkadot/bridges/modules/shift-session-manager/src/lib.rs b/polkadot/bridges/modules/shift-session-manager/src/lib.rs deleted file mode 100644 index 45db8840abe..00000000000 --- a/polkadot/bridges/modules/shift-session-manager/src/lib.rs +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate session manager that selects 2/3 validators from initial set, -//! starting from session 2. - -#![cfg_attr(not(feature = "std"), no_std)] - -use sp_std::prelude::*; - -pub use pallet::*; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: pallet_session::Config {} - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] - pub struct Pallet(PhantomData); - - #[pallet::hooks] - impl Hooks> for Pallet {} - - #[pallet::call] - impl Pallet {} - - /// Validators of first two sessions. - #[pallet::storage] - pub(super) type InitialValidators = StorageValue<_, Vec>; -} - -impl pallet_session::SessionManager for Pallet { - fn end_session(_: sp_staking::SessionIndex) {} - fn start_session(_: sp_staking::SessionIndex) {} - fn new_session(session_index: sp_staking::SessionIndex) -> Option> { - // we don't want to add even more fields to genesis config => just return None - if session_index == 0 || session_index == 1 { - return None - } - - // the idea that on first call (i.e. when session 1 ends) we're reading current - // set of validators from session module (they are initial validators) and save - // in our 'local storage'. - // then for every session we select (deterministically) 2/3 of these initial - // validators to serve validators of new session - let available_validators = InitialValidators::::get().unwrap_or_else(|| { - let validators = >::validators(); - InitialValidators::::put(validators.clone()); - validators - }); - - Some(Self::select_validators(session_index, &available_validators)) - } -} - -impl Pallet { - /// Select validators for session. - fn select_validators( - session_index: sp_staking::SessionIndex, - available_validators: &[T::ValidatorId], - ) -> Vec { - let available_validators_count = available_validators.len(); - let count = sp_std::cmp::max(1, 2 * available_validators_count / 3); - let offset = session_index as usize % available_validators_count; - let end = offset + count; - let session_validators = match end.overflowing_sub(available_validators_count) { - (wrapped_end, false) if wrapped_end != 0 => available_validators[offset..] - .iter() - .chain(available_validators[..wrapped_end].iter()) - .cloned() - .collect(), - _ => available_validators[offset..end].to_vec(), - }; - - session_validators - } -} - -#[cfg(test)] -mod tests { - // From construct_runtime macro - #![allow(clippy::from_over_into)] - - use super::*; - use frame_support::{ - parameter_types, - sp_io::TestExternalities, - sp_runtime::{ - testing::{Header, UintAuthorityId}, - traits::{BlakeTwo256, ConvertInto, IdentityLookup}, - Perbill, RuntimeAppPublic, - }, - traits::GenesisBuild, - weights::Weight, - BasicExternalities, - }; - use sp_core::H256; - - type AccountId = u64; - - type Block = frame_system::mocking::MockBlock; - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - - frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Session: pallet_session::{Pallet}, - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; - } - - parameter_types! { - pub const Period: u64 = 1; - pub const Offset: u64 = 0; - } - - impl pallet_session::Config for TestRuntime { - type Event = (); - type ValidatorId = ::AccountId; - type ValidatorIdOf = ConvertInto; - type ShouldEndSession = pallet_session::PeriodicSessions; - type NextSessionRotation = pallet_session::PeriodicSessions; - type SessionManager = (); - type SessionHandler = TestSessionHandler; - type Keys = UintAuthorityId; - type WeightInfo = (); - } - - impl Config for TestRuntime {} - - pub struct TestSessionHandler; - impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; - - fn on_genesis_session(_validators: &[(AccountId, Ks)]) { - } - - fn on_new_session( - _: bool, - _: &[(AccountId, Ks)], - _: &[(AccountId, Ks)], - ) { - } - - fn on_disabled(_: u32) {} - } - - fn new_test_ext() -> TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - let keys = vec![ - (1, 1, UintAuthorityId(1)), - (2, 2, UintAuthorityId(2)), - (3, 3, UintAuthorityId(3)), - (4, 4, UintAuthorityId(4)), - (5, 5, UintAuthorityId(5)), - ]; - - BasicExternalities::execute_with_storage(&mut t, || { - for (ref k, ..) in &keys { - frame_system::Pallet::::inc_providers(k); - } - }); - - pallet_session::GenesisConfig:: { keys } - .assimilate_storage(&mut t) - .unwrap(); - TestExternalities::new(t) - } - - #[test] - fn shift_session_manager_works() { - new_test_ext().execute_with(|| { - let all_accs = vec![1, 2, 3, 4, 5]; - - // at least 1 validator is selected - assert_eq!(Pallet::::select_validators(0, &[1]), vec![1]); - - // at session#0, shift is also 0 - assert_eq!(Pallet::::select_validators(0, &all_accs), vec![1, 2, 3]); - - // at session#1, shift is also 1 - assert_eq!(Pallet::::select_validators(1, &all_accs), vec![2, 3, 4]); - - // at session#3, we're wrapping - assert_eq!(Pallet::::select_validators(3, &all_accs), vec![4, 5, 1]); - - // at session#5, we're starting from the beginning again - assert_eq!(Pallet::::select_validators(5, &all_accs), vec![1, 2, 3]); - }); - } -} diff --git a/polkadot/bridges/modules/token-swap/Cargo.toml b/polkadot/bridges/modules/token-swap/Cargo.toml deleted file mode 100644 index aad395fb7a3..00000000000 --- a/polkadot/bridges/modules/token-swap/Cargo.toml +++ /dev/null @@ -1,59 +0,0 @@ -[package] -name = "pallet-bridge-token-swap" -description = "An Substrate pallet that allows parties on different chains (bridged using messages pallet) to swap their tokens" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -log = { version = "0.4.14", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-message-dispatch = { path = "../../primitives/message-dispatch", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-token-swap = { path = "../../primitives/token-swap", default-features = false } -pallet-bridge-dispatch = { path = "../dispatch", default-features = false } -pallet-bridge-messages = { path = "../messages", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -default = ["std"] -std = [ - "codec/std", - "bp-message-dispatch/std", - "bp-messages/std", - "bp-runtime/std", - "bp-token-swap/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-bridge-dispatch/std", - "pallet-bridge-messages/std", - "scale-info/std", - "serde", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", -] diff --git a/polkadot/bridges/modules/token-swap/src/benchmarking.rs b/polkadot/bridges/modules/token-swap/src/benchmarking.rs deleted file mode 100644 index 878cb20993a..00000000000 --- a/polkadot/bridges/modules/token-swap/src/benchmarking.rs +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Token-swap pallet benchmarking. - -use crate::{ - swap_account_id, target_account_at_this_chain, BridgedAccountIdOf, BridgedAccountPublicOf, - BridgedAccountSignatureOf, BridgedBalanceOf, Call, Origin, Pallet, ThisChainBalance, - TokenSwapCreationOf, TokenSwapOf, -}; - -use bp_token_swap::{TokenSwap, TokenSwapCreation, TokenSwapState, TokenSwapType}; -use codec::{Decode, Encode}; -use frame_benchmarking::{account, benchmarks_instance_pallet}; -use frame_support::{traits::Currency, Parameter}; -use frame_system::RawOrigin; -use sp_core::H256; -use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Bounded, TrailingZeroInput}; -use sp_std::{boxed::Box, vec::Vec}; - -const SEED: u32 = 0; - -/// Trait that must be implemented by runtime. -pub trait Config: crate::Config { - /// Initialize environment for token swap. - fn initialize_environment(); -} - -benchmarks_instance_pallet! { - where_clause { - where - Origin: Into, - BridgedAccountPublicOf: Decode + Parameter, - BridgedAccountSignatureOf: Decode, - } - - // - // Benchmarks that are used directly by the runtime. - // - - // Benchmark `create_swap` extrinsic. - // - // This benchmark assumes that message is **NOT** actually sent. Instead we're using `send_message_weight` - // from the `WeightInfoExt` trait. - // - // There aren't any factors that affect `create_swap` performance, so everything - // is straightforward here. - create_swap { - T::initialize_environment(); - - let sender = funded_account::("source_account_at_this_chain", 0); - let swap: TokenSwapOf = test_swap::(sender.clone(), true); - let swap_creation: TokenSwapCreationOf = test_swap_creation::(); - }: create_swap( - RawOrigin::Signed(sender.clone()), - swap, - Box::new(swap_creation) - ) - verify { - assert!(crate::PendingSwaps::::contains_key(test_swap_hash::(sender, true))); - } - - // Benchmark `claim_swap` extrinsic with the worst possible conditions: - // - // * swap is locked until some block, so current block number is read. - claim_swap { - T::initialize_environment(); - - let sender: T::AccountId = account("source_account_at_this_chain", 0, SEED); - crate::PendingSwaps::::insert( - test_swap_hash::(sender.clone(), false), - TokenSwapState::Confirmed, - ); - - let swap: TokenSwapOf = test_swap::(sender.clone(), false); - let claimer = target_account_at_this_chain::(&swap); - let token_swap_account = swap_account_id::(&swap); - T::ThisCurrency::make_free_balance_be(&token_swap_account, ThisChainBalance::::max_value()); - }: claim_swap(RawOrigin::Signed(claimer), swap) - verify { - assert!(!crate::PendingSwaps::::contains_key(test_swap_hash::(sender, false))); - } - - // Benchmark `cancel_swap` extrinsic with the worst possible conditions: - // - // * swap is locked until some block, so current block number is read. - cancel_swap { - T::initialize_environment(); - - let sender: T::AccountId = account("source_account_at_this_chain", 0, SEED); - crate::PendingSwaps::::insert( - test_swap_hash::(sender.clone(), false), - TokenSwapState::Failed, - ); - - let swap: TokenSwapOf = test_swap::(sender.clone(), false); - let token_swap_account = swap_account_id::(&swap); - T::ThisCurrency::make_free_balance_be(&token_swap_account, ThisChainBalance::::max_value()); - - }: cancel_swap(RawOrigin::Signed(sender.clone()), swap) - verify { - assert!(!crate::PendingSwaps::::contains_key(test_swap_hash::(sender, false))); - } -} - -/// Returns test token swap. -fn test_swap, I: 'static>(sender: T::AccountId, is_create: bool) -> TokenSwapOf { - TokenSwap { - swap_type: TokenSwapType::LockClaimUntilBlock( - if is_create { 10u32.into() } else { 0u32.into() }, - 0.into(), - ), - source_balance_at_this_chain: source_balance_to_swap::(), - source_account_at_this_chain: sender, - target_balance_at_bridged_chain: target_balance_to_swap::(), - target_account_at_bridged_chain: target_account_at_bridged_chain::(), - } -} - -/// Returns test token swap hash. -fn test_swap_hash, I: 'static>(sender: T::AccountId, is_create: bool) -> H256 { - test_swap::(sender, is_create).using_encoded(blake2_256).into() -} - -/// Returns test token swap creation params. -fn test_swap_creation, I: 'static>() -> TokenSwapCreationOf -where - BridgedAccountPublicOf: Decode, - BridgedAccountSignatureOf: Decode, -{ - TokenSwapCreation { - target_public_at_bridged_chain: target_public_at_bridged_chain::(), - swap_delivery_and_dispatch_fee: swap_delivery_and_dispatch_fee::(), - bridged_chain_spec_version: 0, - bridged_currency_transfer: Vec::new(), - bridged_currency_transfer_weight: 0, - bridged_currency_transfer_signature: bridged_currency_transfer_signature::(), - } -} - -/// Account that has some balance. -fn funded_account, I: 'static>(name: &'static str, index: u32) -> T::AccountId { - let account: T::AccountId = account(name, index, SEED); - T::ThisCurrency::make_free_balance_be(&account, ThisChainBalance::::max_value()); - account -} - -/// Currency transfer message fee. -fn swap_delivery_and_dispatch_fee, I: 'static>() -> ThisChainBalance { - ThisChainBalance::::max_value() / 4u32.into() -} - -/// Balance at the source chain that we're going to swap. -fn source_balance_to_swap, I: 'static>() -> ThisChainBalance { - ThisChainBalance::::max_value() / 2u32.into() -} - -/// Balance at the target chain that we're going to swap. -fn target_balance_to_swap, I: 'static>() -> BridgedBalanceOf { - BridgedBalanceOf::::max_value() / 2u32.into() -} - -/// Public key of `target_account_at_bridged_chain`. -fn target_public_at_bridged_chain, I: 'static>() -> BridgedAccountPublicOf -where - BridgedAccountPublicOf: Decode, -{ - BridgedAccountPublicOf::::decode(&mut TrailingZeroInput::zeroes()) - .expect("failed to decode `BridgedAccountPublicOf` from zeroes") -} - -/// Signature of `target_account_at_bridged_chain` over message. -fn bridged_currency_transfer_signature, I: 'static>() -> BridgedAccountSignatureOf -where - BridgedAccountSignatureOf: Decode, -{ - BridgedAccountSignatureOf::::decode(&mut TrailingZeroInput::zeroes()) - .expect("failed to decode `BridgedAccountSignatureOf` from zeroes") -} - -/// Account at the bridged chain that is participating in the swap. -fn target_account_at_bridged_chain, I: 'static>() -> BridgedAccountIdOf { - account("target_account_at_bridged_chain", 0, SEED) -} diff --git a/polkadot/bridges/modules/token-swap/src/lib.rs b/polkadot/bridges/modules/token-swap/src/lib.rs deleted file mode 100644 index 8a6d48ef7ca..00000000000 --- a/polkadot/bridges/modules/token-swap/src/lib.rs +++ /dev/null @@ -1,1192 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows token swap between two parties acting on different chains. -//! -//! The swap is made using message lanes between This (where `pallet-bridge-token-swap` pallet -//! is deployed) and some other Bridged chain. No other assumptions about the Bridged chain are -//! made, so we don't need it to have an instance of the `pallet-bridge-token-swap` pallet deployed. -//! -//! There are four accounts participating in the swap: -//! -//! 1) account of This chain that has signed the `create_swap` transaction and has balance on This -//! chain. We'll be referring to this account as `source_account_at_this_chain`; -//! -//! 2) account of the Bridged chain that is sending the `claim_swap` message from the Bridged to -//! This chain. This account has balance on Bridged chain and is willing to swap these tokens to -//! This chain tokens of the `source_account_at_this_chain`. We'll be referring to this account -//! as `target_account_at_bridged_chain`; -//! -//! 3) account of the Bridged chain that is indirectly controlled by the -//! `source_account_at_this_chain`. We'll be referring this account as -//! `source_account_at_bridged_chain`; -//! -//! 4) account of This chain that is indirectly controlled by the `target_account_at_bridged_chain`. -//! We'll be referring this account as `target_account_at_this_chain`. -//! -//! So the tokens swap is an intention of `source_account_at_this_chain` to swap his -//! `source_balance_at_this_chain` tokens to the `target_balance_at_bridged_chain` tokens owned by -//! `target_account_at_bridged_chain`. The swap process goes as follows: -//! -//! 1) the `source_account_at_this_chain` account submits the `create_swap` transaction on This -//! chain; -//! -//! 2) the tokens transfer message that would transfer `target_balance_at_bridged_chain` -//! tokens from the `target_account_at_bridged_chain` to the `source_account_at_bridged_chain`, -//! is sent over the bridge; -//! -//! 3) when transfer message is delivered and dispatched, the pallet receives notification; -//! -//! 4) if message has been successfully dispatched, the `target_account_at_bridged_chain` sends the -//! message that would transfer `source_balance_at_this_chain` tokens to his -//! `target_account_at_this_chain` account; -//! -//! 5) if message dispatch has failed, the `source_account_at_this_chain` may submit the -//! `cancel_swap` transaction and return his `source_balance_at_this_chain` back to his account. -//! -//! While swap is pending, the `source_balance_at_this_chain` tokens are owned by the special -//! temporary `swap_account_at_this_chain` account. It is destroyed upon swap completion. - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_messages::{ - source_chain::{MessagesBridge, OnDeliveryConfirmed}, - DeliveredMessages, LaneId, MessageNonce, -}; -use bp_runtime::{messages::DispatchFeePayment, ChainId}; -use bp_token_swap::{ - RawBridgedTransferCall, TokenSwap, TokenSwapCreation, TokenSwapState, TokenSwapType, -}; -use codec::{Decode, Encode}; -use frame_support::{ - fail, - traits::{Currency, ExistenceRequirement}, - weights::PostDispatchInfo, - RuntimeDebug, -}; -use scale_info::TypeInfo; -use sp_core::H256; -use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Convert, Saturating}; -use sp_std::{boxed::Box, marker::PhantomData}; -use weights::WeightInfo; - -pub use weights_ext::WeightInfoExt; - -#[cfg(test)] -mod mock; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -pub mod weights; -pub mod weights_ext; - -pub use pallet::*; - -/// Name of the `PendingSwaps` storage map. -pub const PENDING_SWAPS_MAP_NAME: &str = "PendingSwaps"; - -/// Origin for the token swap pallet. -#[derive(PartialEq, Eq, Clone, RuntimeDebug, Encode, Decode, TypeInfo)] -pub enum RawOrigin { - /// The call is originated by the token swap account. - TokenSwap { - /// Id of the account that has started the swap. - source_account_at_this_chain: AccountId, - /// Id of the account that holds the funds during this swap. The message fee is paid from - /// this account funds. - swap_account_at_this_chain: AccountId, - }, - /// Dummy to manage the fact we have instancing. - _Phantom(PhantomData), -} - -// comes from #[pallet::event] -#[allow(clippy::unused_unit)] -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + IsType<::Event>; - /// Benchmarks results from runtime we're plugged into. - type WeightInfo: WeightInfoExt; - - /// Id of the bridge with the Bridged chain. - type BridgedChainId: Get; - /// The identifier of outbound message lane on This chain used to send token transfer - /// messages to the Bridged chain. - /// - /// It is highly recommended to use dedicated lane for every instance of token swap - /// pallet. Messages delivery confirmation callback is implemented in the way that - /// for every confirmed message, there is (at least) a storage read. Which mean, - /// that if pallet will see unrelated confirmations, it'll just burn storage-read - /// weight, achieving nothing. - type OutboundMessageLaneId: Get; - /// Messages bridge with Bridged chain. - type MessagesBridge: MessagesBridge< - Self::Origin, - Self::AccountId, - >::Balance, - MessagePayloadOf, - >; - - /// This chain Currency used in the tokens swap. - type ThisCurrency: Currency; - /// Converter from raw hash (derived from swap) to This chain account. - type FromSwapToThisAccountIdConverter: Convert; - - /// The chain we're bridged to. - type BridgedChain: bp_runtime::Chain; - /// Converter from raw hash (derived from Bridged chain account) to This chain account. - type FromBridgedToThisAccountIdConverter: Convert; - } - - /// Tokens balance at This chain. - pub type ThisChainBalance = <>::ThisCurrency as Currency< - ::AccountId, - >>::Balance; - - /// Type of the Bridged chain. - pub type BridgedChainOf = >::BridgedChain; - /// Tokens balance type at the Bridged chain. - pub type BridgedBalanceOf = bp_runtime::BalanceOf>; - /// Account identifier type at the Bridged chain. - pub type BridgedAccountIdOf = bp_runtime::AccountIdOf>; - /// Account public key type at the Bridged chain. - pub type BridgedAccountPublicOf = bp_runtime::AccountPublicOf>; - /// Account signature type at the Bridged chain. - pub type BridgedAccountSignatureOf = bp_runtime::SignatureOf>; - - /// Bridge message payload used by the pallet. - pub type MessagePayloadOf = bp_message_dispatch::MessagePayload< - ::AccountId, - BridgedAccountPublicOf, - BridgedAccountSignatureOf, - RawBridgedTransferCall, - >; - /// Type of `TokenSwap` used by the pallet. - pub type TokenSwapOf = TokenSwap< - BlockNumberFor, - ThisChainBalance, - ::AccountId, - BridgedBalanceOf, - BridgedAccountIdOf, - >; - /// Type of `TokenSwapCreation` used by the pallet. - pub type TokenSwapCreationOf = TokenSwapCreation< - BridgedAccountPublicOf, - ThisChainBalance, - BridgedAccountSignatureOf, - >; - - #[pallet::pallet] - #[pallet::generate_store(pub(super) trait Store)] - #[pallet::without_storage_info] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet {} - - #[pallet::call] - impl, I: 'static> Pallet - where - BridgedAccountPublicOf: Parameter, - Origin: Into, - { - /// Start token swap procedure. - /// - /// The dispatch origin for this call must be exactly the - /// `swap.source_account_at_this_chain` account. - /// - /// Method arguments are: - /// - /// - `swap` - token swap intention; - /// - `swap_creation_params` - additional parameters required to start tokens swap. - /// - /// The `source_account_at_this_chain` MUST have enough balance to cover both token swap and - /// message transfer. Message fee may be estimated using corresponding `OutboundLaneApi` of - /// This runtime. - /// - /// **WARNING**: the submitter of this transaction is responsible for verifying: - /// - /// 1) that the `swap_creation_params.bridged_currency_transfer` represents a valid token - /// transfer call that transfers `swap.target_balance_at_bridged_chain` to his - /// `swap.source_account_at_bridged_chain` account; - /// - /// 2) that either the `swap.source_account_at_bridged_chain` already exists, or the - /// `swap.target_balance_at_bridged_chain` is above existential deposit of the Bridged - /// chain; - /// - /// 3) the `swap_creation_params.target_public_at_bridged_chain` matches the - /// `swap.target_account_at_bridged_chain`; - /// - /// 4) the `bridged_currency_transfer_signature` is valid and generated by the owner of - /// the `swap_creation_params.target_public_at_bridged_chain` account (read more - /// about [`CallOrigin::TargetAccount`]). - /// - /// Violating rule#1 will lead to losing your `source_balance_at_this_chain` tokens. - /// Violating other rules will lead to losing message fees for this and other transactions + - /// losing fees for message transfer. - #[allow(clippy::boxed_local)] - #[pallet::weight( - T::WeightInfo::create_swap() - .saturating_add(T::WeightInfo::send_message_weight( - &&swap_creation_params.bridged_currency_transfer[..], - T::DbWeight::get(), - )) - )] - pub fn create_swap( - origin: OriginFor, - swap: TokenSwapOf, - swap_creation_params: Box>, - ) -> DispatchResultWithPostInfo { - let TokenSwapCreation { - target_public_at_bridged_chain, - swap_delivery_and_dispatch_fee, - bridged_chain_spec_version, - bridged_currency_transfer, - bridged_currency_transfer_weight, - bridged_currency_transfer_signature, - } = *swap_creation_params; - - // ensure that the `origin` is the same account that is mentioned in the `swap` - // intention - let origin_account = ensure_signed(origin)?; - ensure!( - origin_account == swap.source_account_at_this_chain, - Error::::MismatchedSwapSourceOrigin, - ); - - // remember weight components - let base_weight = T::WeightInfo::create_swap(); - - // we can't exchange less than existential deposit (the temporary `swap_account` account - // won't be created then) - // - // the same can also happen with the `swap.bridged_balance`, but we can't check it - // here (without additional knowledge of the Bridged chain). So it is the `origin` - // responsibility to check that the swap is valid. - ensure!( - swap.source_balance_at_this_chain >= T::ThisCurrency::minimum_balance(), - Error::::TooLowBalanceOnThisChain, - ); - - // if the swap is replay-protected, then we need to ensure that we have not yet passed - // the specified block yet - match swap.swap_type { - TokenSwapType::TemporaryTargetAccountAtBridgedChain => (), - TokenSwapType::LockClaimUntilBlock(block_number, _) => ensure!( - block_number >= frame_system::Pallet::::block_number(), - Error::::SwapPeriodIsFinished, - ), - } - - let swap_account = swap_account_id::(&swap); - let actual_send_message_weight = frame_support::storage::with_transaction(|| { - // funds are transferred from This account to the temporary Swap account - let transfer_result = T::ThisCurrency::transfer( - &swap.source_account_at_this_chain, - &swap_account, - // saturating_add is ok, or we have the chain where single holder owns all - // tokens - swap.source_balance_at_this_chain - .saturating_add(swap_delivery_and_dispatch_fee), - // if we'll allow account to die, then he'll be unable to `cancel_claim` - // if something won't work - ExistenceRequirement::KeepAlive, - ); - if let Err(err) = transfer_result { - log::error!( - target: "runtime::bridge-token-swap", - "Failed to transfer This chain tokens for the swap {:?} to Swap account ({:?}): {:?}", - swap, - swap_account, - err, - ); - - return sp_runtime::TransactionOutcome::Rollback(Err( - Error::::FailedToTransferToSwapAccount, - )) - } - - // the transfer message is sent over the bridge. The message is supposed to be a - // `Currency::transfer` call on the bridged chain, but no checks are made - it is - // the transaction submitter to ensure it is valid. - let send_message_result = T::MessagesBridge::send_message( - RawOrigin::TokenSwap { - source_account_at_this_chain: swap.source_account_at_this_chain.clone(), - swap_account_at_this_chain: swap_account.clone(), - } - .into(), - T::OutboundMessageLaneId::get(), - bp_message_dispatch::MessagePayload { - spec_version: bridged_chain_spec_version, - weight: bridged_currency_transfer_weight, - origin: bp_message_dispatch::CallOrigin::TargetAccount( - swap_account, - target_public_at_bridged_chain, - bridged_currency_transfer_signature, - ), - dispatch_fee_payment: DispatchFeePayment::AtTargetChain, - call: bridged_currency_transfer, - }, - swap_delivery_and_dispatch_fee, - ); - let sent_message = match send_message_result { - Ok(sent_message) => sent_message, - Err(err) => { - log::error!( - target: "runtime::bridge-token-swap", - "Failed to send token transfer message for swap {:?} to the Bridged chain: {:?}", - swap, - err, - ); - - return sp_runtime::TransactionOutcome::Rollback(Err( - Error::::FailedToSendTransferMessage, - )) - }, - }; - - // remember that we have started the swap - let swap_hash = swap.using_encoded(blake2_256).into(); - let insert_swap_result = - PendingSwaps::::try_mutate(swap_hash, |maybe_state| { - if maybe_state.is_some() { - return Err(()) - } - - *maybe_state = Some(TokenSwapState::Started); - Ok(()) - }); - if insert_swap_result.is_err() { - log::error!( - target: "runtime::bridge-token-swap", - "Failed to start token swap {:?}: the swap is already started", - swap, - ); - - return sp_runtime::TransactionOutcome::Rollback(Err( - Error::::SwapAlreadyStarted, - )) - } - - log::trace!( - target: "runtime::bridge-token-swap", - "The swap {:?} (hash {:?}) has been started", - swap, - swap_hash, - ); - - // remember that we're waiting for the transfer message delivery confirmation - PendingMessages::::insert(sent_message.nonce, swap_hash); - - // finally - emit the event - Self::deposit_event(Event::SwapStarted { - swap_hash, - message_nonce: sent_message.nonce - }); - - sp_runtime::TransactionOutcome::Commit(Ok(sent_message.weight)) - })?; - - Ok(PostDispatchInfo { - actual_weight: Some(base_weight.saturating_add(actual_send_message_weight)), - pays_fee: Pays::Yes, - }) - } - - /// Claim previously reserved `source_balance_at_this_chain` by - /// `target_account_at_this_chain`. - /// - /// **WARNING**: the correct way to call this function is to call it over the messages - /// bridge with dispatch origin set to - /// `pallet_bridge_dispatch::CallOrigin::SourceAccount(target_account_at_bridged_chain)`. - /// - /// This should be called only when successful transfer confirmation has been received. - #[pallet::weight(T::WeightInfo::claim_swap())] - pub fn claim_swap( - origin: OriginFor, - swap: TokenSwapOf, - ) -> DispatchResultWithPostInfo { - // ensure that the `origin` is controlled by the `swap.target_account_at_bridged_chain` - let origin_account = ensure_signed(origin)?; - let target_account_at_this_chain = target_account_at_this_chain::(&swap); - ensure!(origin_account == target_account_at_this_chain, Error::::InvalidClaimant,); - - // ensure that the swap is confirmed - let swap_hash = swap.using_encoded(blake2_256).into(); - let swap_state = PendingSwaps::::get(swap_hash); - match swap_state { - Some(TokenSwapState::Started) => fail!(Error::::SwapIsPending), - Some(TokenSwapState::Confirmed) => { - let is_claim_allowed = match swap.swap_type { - TokenSwapType::TemporaryTargetAccountAtBridgedChain => true, - TokenSwapType::LockClaimUntilBlock(block_number, _) => - block_number < frame_system::Pallet::::block_number(), - }; - - ensure!(is_claim_allowed, Error::::SwapIsTemporaryLocked); - }, - Some(TokenSwapState::Failed) => fail!(Error::::SwapIsFailed), - None => fail!(Error::::SwapIsInactive), - } - - complete_claim::(swap, swap_hash, origin_account, Event::SwapClaimed { swap_hash }) - } - - /// Return previously reserved `source_balance_at_this_chain` back to the - /// `source_account_at_this_chain`. - /// - /// This should be called only when transfer has failed at Bridged chain and we have - /// received notification about that. - #[pallet::weight(T::WeightInfo::cancel_swap())] - pub fn cancel_swap( - origin: OriginFor, - swap: TokenSwapOf, - ) -> DispatchResultWithPostInfo { - // ensure that the `origin` is the same account that is mentioned in the `swap` - // intention - let origin_account = ensure_signed(origin)?; - ensure!( - origin_account == swap.source_account_at_this_chain, - Error::::MismatchedSwapSourceOrigin, - ); - - // ensure that the swap has failed - let swap_hash = swap.using_encoded(blake2_256).into(); - let swap_state = PendingSwaps::::get(swap_hash); - match swap_state { - Some(TokenSwapState::Started) => fail!(Error::::SwapIsPending), - Some(TokenSwapState::Confirmed) => fail!(Error::::SwapIsConfirmed), - Some(TokenSwapState::Failed) => { - // we allow canceling swap even before lock period is over - the - // `source_account_at_this_chain` has already paid for nothing and it is up to - // him to decide whether he want to try again - }, - None => fail!(Error::::SwapIsInactive), - } - - complete_claim::(swap, swap_hash, origin_account, Event::SwapCanceled { swap_hash }) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event, I: 'static = ()> { - /// Tokens swap has been started and message has been sent to the bridged message. - SwapStarted { swap_hash: H256, message_nonce: MessageNonce }, - /// Token swap has been claimed. - SwapClaimed { swap_hash: H256 }, - /// Token swap has been canceled. - SwapCanceled { swap_hash: H256 }, - } - - #[pallet::error] - pub enum Error { - /// The account that has submitted the `start_claim` doesn't match the - /// `TokenSwap::source_account_at_this_chain`. - MismatchedSwapSourceOrigin, - /// The swap balance in This chain tokens is below existential deposit and can't be made. - TooLowBalanceOnThisChain, - /// Transfer from This chain account to temporary Swap account has failed. - FailedToTransferToSwapAccount, - /// Transfer from the temporary Swap account to the derived account of Bridged account has - /// failed. - FailedToTransferFromSwapAccount, - /// The message to transfer tokens on Target chain can't be sent. - FailedToSendTransferMessage, - /// The same swap is already started. - SwapAlreadyStarted, - /// Swap outcome is not yet received. - SwapIsPending, - /// Someone is trying to claim swap that has failed. - SwapIsFailed, - /// Claiming swap is not allowed. - /// - /// Now the only possible case when you may get this error, is when you're trying to claim - /// swap with `TokenSwapType::LockClaimUntilBlock` before lock period is over. - SwapIsTemporaryLocked, - /// Swap period is finished and you can not restart it. - /// - /// Now the only possible case when you may get this error, is when you're trying to start - /// swap with `TokenSwapType::LockClaimUntilBlock` after lock period is over. - SwapPeriodIsFinished, - /// Someone is trying to cancel swap that has been confirmed. - SwapIsConfirmed, - /// Someone is trying to claim/cancel swap that is either not started or already - /// claimed/canceled. - SwapIsInactive, - /// The swap claimant is invalid. - InvalidClaimant, - } - - /// Origin for the token swap pallet. - #[pallet::origin] - pub type Origin = RawOrigin<::AccountId, I>; - - /// Pending token swaps states. - #[pallet::storage] - pub type PendingSwaps, I: 'static = ()> = - StorageMap<_, Identity, H256, TokenSwapState>; - - /// Pending transfer messages. - #[pallet::storage] - pub type PendingMessages, I: 'static = ()> = - StorageMap<_, Identity, MessageNonce, H256>; - - impl, I: 'static> OnDeliveryConfirmed for Pallet { - fn on_messages_delivered(lane: &LaneId, delivered_messages: &DeliveredMessages) -> Weight { - // we're only interested in our lane messages - if *lane != T::OutboundMessageLaneId::get() { - return 0 - } - - // so now we're dealing with our lane messages. Ideally we'll have dedicated lane - // and every message from `delivered_messages` is actually our transfer message. - // But it may be some shared lane (which is not recommended). - let mut reads = 0; - let mut writes = 0; - for message_nonce in delivered_messages.begin..=delivered_messages.end { - reads += 1; - if let Some(swap_hash) = PendingMessages::::take(message_nonce) { - writes += 1; - - let token_swap_state = - if delivered_messages.message_dispatch_result(message_nonce) { - TokenSwapState::Confirmed - } else { - TokenSwapState::Failed - }; - - log::trace!( - target: "runtime::bridge-token-swap", - "The dispatch of swap {:?} has been completed with {:?} status", - swap_hash, - token_swap_state, - ); - - PendingSwaps::::insert(swap_hash, token_swap_state); - } - } - - ::DbWeight::get().reads_writes(reads, writes) - } - } - - /// Returns temporary account id used to lock funds during swap on This chain. - pub(crate) fn swap_account_id, I: 'static>( - swap: &TokenSwapOf, - ) -> T::AccountId { - T::FromSwapToThisAccountIdConverter::convert(swap.using_encoded(blake2_256).into()) - } - - /// Expected target account representation on This chain (aka `target_account_at_this_chain`). - pub(crate) fn target_account_at_this_chain, I: 'static>( - swap: &TokenSwapOf, - ) -> T::AccountId { - T::FromBridgedToThisAccountIdConverter::convert(bp_runtime::derive_account_id( - T::BridgedChainId::get(), - bp_runtime::SourceAccount::Account(swap.target_account_at_bridged_chain.clone()), - )) - } - - /// Complete claim with given outcome. - pub(crate) fn complete_claim, I: 'static>( - swap: TokenSwapOf, - swap_hash: H256, - destination_account: T::AccountId, - event: Event, - ) -> DispatchResultWithPostInfo { - let swap_account = swap_account_id::(&swap); - frame_support::storage::with_transaction(|| { - // funds are transferred from the temporary Swap account to the destination account - let transfer_result = T::ThisCurrency::transfer( - &swap_account, - &destination_account, - swap.source_balance_at_this_chain, - ExistenceRequirement::AllowDeath, - ); - if let Err(err) = transfer_result { - log::error!( - target: "runtime::bridge-token-swap", - "Failed to transfer This chain tokens for the swap {:?} from the Swap account {:?} to {:?}: {:?}", - swap, - swap_account, - destination_account, - err, - ); - - return sp_runtime::TransactionOutcome::Rollback(Err( - Error::::FailedToTransferFromSwapAccount.into(), - )) - } - - log::trace!( - target: "runtime::bridge-token-swap", - "The swap {:?} (hash {:?}) has been completed with {} status", - swap, - swap_hash, - match event { - Event::SwapClaimed { swap_hash: _ } => "claimed", - Event::SwapCanceled { swap_hash: _ } => "canceled", - _ => "", - }, - ); - - // forget about swap - PendingSwaps::::remove(swap_hash); - - // finally - emit the event - Pallet::::deposit_event(event); - - sp_runtime::TransactionOutcome::Commit(Ok(().into())) - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use frame_support::{assert_noop, assert_ok, storage::generator::StorageMap}; - - const CAN_START_BLOCK_NUMBER: u64 = 10; - const CAN_CLAIM_BLOCK_NUMBER: u64 = CAN_START_BLOCK_NUMBER + 1; - - const BRIDGED_CHAIN_ACCOUNT: BridgedAccountId = 3; - const BRIDGED_CHAIN_SPEC_VERSION: u32 = 4; - const BRIDGED_CHAIN_CALL_WEIGHT: Balance = 5; - - fn bridged_chain_account_public() -> BridgedAccountPublic { - 1.into() - } - - fn bridged_chain_account_signature() -> BridgedAccountSignature { - sp_runtime::testing::TestSignature(2, Vec::new()) - } - - fn test_swap() -> TokenSwapOf { - bp_token_swap::TokenSwap { - swap_type: TokenSwapType::LockClaimUntilBlock(CAN_START_BLOCK_NUMBER, 0.into()), - source_balance_at_this_chain: 100, - source_account_at_this_chain: THIS_CHAIN_ACCOUNT, - target_balance_at_bridged_chain: 200, - target_account_at_bridged_chain: BRIDGED_CHAIN_ACCOUNT, - } - } - - fn test_swap_creation() -> TokenSwapCreationOf { - TokenSwapCreation { - target_public_at_bridged_chain: bridged_chain_account_public(), - swap_delivery_and_dispatch_fee: SWAP_DELIVERY_AND_DISPATCH_FEE, - bridged_chain_spec_version: BRIDGED_CHAIN_SPEC_VERSION, - bridged_currency_transfer: test_transfer(), - bridged_currency_transfer_weight: BRIDGED_CHAIN_CALL_WEIGHT, - bridged_currency_transfer_signature: bridged_chain_account_signature(), - } - } - - fn test_swap_hash() -> H256 { - test_swap().using_encoded(blake2_256).into() - } - - fn test_transfer() -> RawBridgedTransferCall { - vec![OK_TRANSFER_CALL] - } - - fn start_test_swap() { - assert_ok!(Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap(), - Box::new(TokenSwapCreation { - target_public_at_bridged_chain: bridged_chain_account_public(), - swap_delivery_and_dispatch_fee: SWAP_DELIVERY_AND_DISPATCH_FEE, - bridged_chain_spec_version: BRIDGED_CHAIN_SPEC_VERSION, - bridged_currency_transfer: test_transfer(), - bridged_currency_transfer_weight: BRIDGED_CHAIN_CALL_WEIGHT, - bridged_currency_transfer_signature: bridged_chain_account_signature(), - }), - )); - } - - fn receive_test_swap_confirmation(success: bool) { - Pallet::::on_messages_delivered( - &OutboundMessageLaneId::get(), - &DeliveredMessages::new(MESSAGE_NONCE, success), - ); - } - - #[test] - fn create_swap_fails_if_origin_is_incorrect() { - run_test(|| { - assert_noop!( - Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT + 1), - test_swap(), - Box::new(test_swap_creation()), - ), - Error::::MismatchedSwapSourceOrigin - ); - }); - } - - #[test] - fn create_swap_fails_if_this_chain_balance_is_below_existential_deposit() { - run_test(|| { - let mut swap = test_swap(); - swap.source_balance_at_this_chain = ExistentialDeposit::get() - 1; - assert_noop!( - Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - swap, - Box::new(test_swap_creation()), - ), - Error::::TooLowBalanceOnThisChain - ); - }); - } - - #[test] - fn create_swap_fails_if_currency_transfer_to_swap_account_fails() { - run_test(|| { - let mut swap = test_swap(); - swap.source_balance_at_this_chain = THIS_CHAIN_ACCOUNT_BALANCE + 1; - assert_noop!( - Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - swap, - Box::new(test_swap_creation()), - ), - Error::::FailedToTransferToSwapAccount - ); - }); - } - - #[test] - fn create_swap_fails_if_send_message_fails() { - run_test(|| { - let mut transfer = test_transfer(); - transfer[0] = BAD_TRANSFER_CALL; - let mut swap_creation = test_swap_creation(); - swap_creation.bridged_currency_transfer = transfer; - assert_noop!( - Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap(), - Box::new(swap_creation), - ), - Error::::FailedToSendTransferMessage - ); - }); - } - - #[test] - fn create_swap_fails_if_swap_is_active() { - run_test(|| { - assert_ok!(Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap(), - Box::new(test_swap_creation()), - )); - - assert_noop!( - Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap(), - Box::new(test_swap_creation()), - ), - Error::::SwapAlreadyStarted - ); - }); - } - - #[test] - fn create_swap_fails_if_trying_to_start_swap_after_lock_period_is_finished() { - run_test(|| { - frame_system::Pallet::::set_block_number(CAN_START_BLOCK_NUMBER + 1); - assert_noop!( - Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap(), - Box::new(test_swap_creation()), - ), - Error::::SwapPeriodIsFinished - ); - }); - } - - #[test] - fn create_swap_succeeds_if_trying_to_start_swap_at_lock_period_end() { - run_test(|| { - frame_system::Pallet::::set_block_number(CAN_START_BLOCK_NUMBER); - assert_ok!(Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap(), - Box::new(test_swap_creation()), - )); - }); - } - - #[test] - fn create_swap_succeeds() { - run_test(|| { - frame_system::Pallet::::set_block_number(1); - frame_system::Pallet::::reset_events(); - - assert_ok!(Pallet::::create_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap(), - Box::new(test_swap_creation()), - )); - - let swap_hash = test_swap_hash(); - assert_eq!(PendingSwaps::::get(swap_hash), Some(TokenSwapState::Started)); - assert_eq!(PendingMessages::::get(MESSAGE_NONCE), Some(swap_hash)); - assert_eq!( - pallet_balances::Pallet::::free_balance(&swap_account_id::< - TestRuntime, - (), - >(&test_swap())), - test_swap().source_balance_at_this_chain + SWAP_DELIVERY_AND_DISPATCH_FEE, - ); - assert!( - frame_system::Pallet::::events().iter().any(|e| e.event == - crate::mock::Event::TokenSwap(crate::Event::SwapStarted { - swap_hash, - message_nonce: MESSAGE_NONCE, - })), - "Missing SwapStarted event: {:?}", - frame_system::Pallet::::events(), - ); - }); - } - - #[test] - fn claim_swap_fails_if_origin_is_incorrect() { - run_test(|| { - assert_noop!( - Pallet::::claim_swap( - mock::Origin::signed( - 1 + target_account_at_this_chain::(&test_swap()) - ), - test_swap(), - ), - Error::::InvalidClaimant - ); - }); - } - - #[test] - fn claim_swap_fails_if_swap_is_pending() { - run_test(|| { - PendingSwaps::::insert(test_swap_hash(), TokenSwapState::Started); - - assert_noop!( - Pallet::::claim_swap( - mock::Origin::signed(target_account_at_this_chain::( - &test_swap() - )), - test_swap(), - ), - Error::::SwapIsPending - ); - }); - } - - #[test] - fn claim_swap_fails_if_swap_is_failed() { - run_test(|| { - PendingSwaps::::insert(test_swap_hash(), TokenSwapState::Failed); - - assert_noop!( - Pallet::::claim_swap( - mock::Origin::signed(target_account_at_this_chain::( - &test_swap() - )), - test_swap(), - ), - Error::::SwapIsFailed - ); - }); - } - - #[test] - fn claim_swap_fails_if_swap_is_inactive() { - run_test(|| { - assert_noop!( - Pallet::::claim_swap( - mock::Origin::signed(target_account_at_this_chain::( - &test_swap() - )), - test_swap(), - ), - Error::::SwapIsInactive - ); - }); - } - - #[test] - fn claim_swap_fails_if_currency_transfer_from_swap_account_fails() { - run_test(|| { - frame_system::Pallet::::set_block_number(CAN_CLAIM_BLOCK_NUMBER); - PendingSwaps::::insert(test_swap_hash(), TokenSwapState::Confirmed); - - assert_noop!( - Pallet::::claim_swap( - mock::Origin::signed(target_account_at_this_chain::( - &test_swap() - )), - test_swap(), - ), - Error::::FailedToTransferFromSwapAccount - ); - }); - } - - #[test] - fn claim_swap_fails_before_lock_period_is_completed() { - run_test(|| { - start_test_swap(); - receive_test_swap_confirmation(true); - - frame_system::Pallet::::set_block_number(CAN_CLAIM_BLOCK_NUMBER - 1); - - assert_noop!( - Pallet::::claim_swap( - mock::Origin::signed(target_account_at_this_chain::( - &test_swap() - )), - test_swap(), - ), - Error::::SwapIsTemporaryLocked - ); - }); - } - - #[test] - fn claim_swap_succeeds() { - run_test(|| { - start_test_swap(); - receive_test_swap_confirmation(true); - - frame_system::Pallet::::set_block_number(CAN_CLAIM_BLOCK_NUMBER); - frame_system::Pallet::::reset_events(); - - assert_ok!(Pallet::::claim_swap( - mock::Origin::signed(target_account_at_this_chain::(&test_swap())), - test_swap(), - )); - - let swap_hash = test_swap_hash(); - assert_eq!(PendingSwaps::::get(swap_hash), None); - assert_eq!( - pallet_balances::Pallet::::free_balance(&swap_account_id::< - TestRuntime, - (), - >(&test_swap())), - 0, - ); - assert_eq!( - pallet_balances::Pallet::::free_balance( - &target_account_at_this_chain::(&test_swap()), - ), - test_swap().source_balance_at_this_chain, - ); - assert!( - frame_system::Pallet::::events().iter().any(|e| e.event == - crate::mock::Event::TokenSwap(crate::Event::SwapClaimed { swap_hash })), - "Missing SwapClaimed event: {:?}", - frame_system::Pallet::::events(), - ); - }); - } - - #[test] - fn cancel_swap_fails_if_origin_is_incorrect() { - run_test(|| { - start_test_swap(); - receive_test_swap_confirmation(false); - - assert_noop!( - Pallet::::cancel_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT + 1), - test_swap() - ), - Error::::MismatchedSwapSourceOrigin - ); - }); - } - - #[test] - fn cancel_swap_fails_if_swap_is_pending() { - run_test(|| { - start_test_swap(); - - assert_noop!( - Pallet::::cancel_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap() - ), - Error::::SwapIsPending - ); - }); - } - - #[test] - fn cancel_swap_fails_if_swap_is_confirmed() { - run_test(|| { - start_test_swap(); - receive_test_swap_confirmation(true); - - assert_noop!( - Pallet::::cancel_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap() - ), - Error::::SwapIsConfirmed - ); - }); - } - - #[test] - fn cancel_swap_fails_if_swap_is_inactive() { - run_test(|| { - assert_noop!( - Pallet::::cancel_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap() - ), - Error::::SwapIsInactive - ); - }); - } - - #[test] - fn cancel_swap_fails_if_currency_transfer_from_swap_account_fails() { - run_test(|| { - start_test_swap(); - receive_test_swap_confirmation(false); - let _ = pallet_balances::Pallet::::slash( - &swap_account_id::(&test_swap()), - test_swap().source_balance_at_this_chain, - ); - - assert_noop!( - Pallet::::cancel_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap() - ), - Error::::FailedToTransferFromSwapAccount - ); - }); - } - - #[test] - fn cancel_swap_succeeds() { - run_test(|| { - start_test_swap(); - receive_test_swap_confirmation(false); - - frame_system::Pallet::::set_block_number(1); - frame_system::Pallet::::reset_events(); - - assert_ok!(Pallet::::cancel_swap( - mock::Origin::signed(THIS_CHAIN_ACCOUNT), - test_swap() - )); - - let swap_hash = test_swap_hash(); - assert_eq!(PendingSwaps::::get(swap_hash), None); - assert_eq!( - pallet_balances::Pallet::::free_balance(&swap_account_id::< - TestRuntime, - (), - >(&test_swap())), - 0, - ); - assert_eq!( - pallet_balances::Pallet::::free_balance(&THIS_CHAIN_ACCOUNT), - THIS_CHAIN_ACCOUNT_BALANCE - SWAP_DELIVERY_AND_DISPATCH_FEE, - ); - assert!( - frame_system::Pallet::::events().iter().any(|e| e.event == - crate::mock::Event::TokenSwap(crate::Event::SwapCanceled { swap_hash })), - "Missing SwapCanceled event: {:?}", - frame_system::Pallet::::events(), - ); - }); - } - - #[test] - fn messages_delivery_confirmations_are_accepted() { - run_test(|| { - start_test_swap(); - assert_eq!( - PendingMessages::::get(MESSAGE_NONCE), - Some(test_swap_hash()) - ); - assert_eq!( - PendingSwaps::::get(test_swap_hash()), - Some(TokenSwapState::Started) - ); - - // when unrelated messages are delivered - let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 2, true); - messages.note_dispatched_message(false); - Pallet::::on_messages_delivered( - &OutboundMessageLaneId::get(), - &messages, - ); - assert_eq!( - PendingMessages::::get(MESSAGE_NONCE), - Some(test_swap_hash()) - ); - assert_eq!( - PendingSwaps::::get(test_swap_hash()), - Some(TokenSwapState::Started) - ); - - // when message we're interested in is accompanied by a bunch of other messages - let mut messages = DeliveredMessages::new(MESSAGE_NONCE - 1, false); - messages.note_dispatched_message(true); - messages.note_dispatched_message(false); - Pallet::::on_messages_delivered( - &OutboundMessageLaneId::get(), - &messages, - ); - assert_eq!(PendingMessages::::get(MESSAGE_NONCE), None); - assert_eq!( - PendingSwaps::::get(test_swap_hash()), - Some(TokenSwapState::Confirmed) - ); - }); - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - PendingSwaps::::storage_map_final_key(test_swap_hash()), - bp_token_swap::storage_keys::pending_swaps_key("TokenSwap", test_swap_hash()).0, - ); - } -} diff --git a/polkadot/bridges/modules/token-swap/src/mock.rs b/polkadot/bridges/modules/token-swap/src/mock.rs deleted file mode 100644 index ece7b16acc9..00000000000 --- a/polkadot/bridges/modules/token-swap/src/mock.rs +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate as pallet_bridge_token_swap; -use crate::MessagePayloadOf; - -use bp_messages::{ - source_chain::{MessagesBridge, SendMessageArtifacts}, - LaneId, MessageNonce, -}; -use bp_runtime::ChainId; -use frame_support::weights::Weight; -use sp_core::H256; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, -}; - -pub type AccountId = u64; -pub type Balance = u64; -pub type Block = frame_system::mocking::MockBlock; -pub type BridgedAccountId = u64; -pub type BridgedAccountPublic = sp_runtime::testing::UintAuthorityId; -pub type BridgedAccountSignature = sp_runtime::testing::TestSignature; -pub type BridgedBalance = u64; -pub type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - -pub const OK_TRANSFER_CALL: u8 = 1; -pub const BAD_TRANSFER_CALL: u8 = 2; -pub const MESSAGE_NONCE: MessageNonce = 3; - -pub const THIS_CHAIN_ACCOUNT: AccountId = 1; -pub const THIS_CHAIN_ACCOUNT_BALANCE: Balance = 100_000; - -pub const SWAP_DELIVERY_AND_DISPATCH_FEE: Balance = 1; - -frame_support::construct_runtime! { - pub enum TestRuntime where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Event}, - TokenSwap: pallet_bridge_token_swap::{Pallet, Call, Event, Origin}, - } -} - -frame_support::parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); -} - -impl frame_system::Config for TestRuntime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = SubstrateHeader; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type BaseCallFilter = frame_support::traits::Everything; - type SystemWeightInfo = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type SS58Prefix = (); - type OnSetCode = (); - type MaxConsumers = frame_support::traits::ConstU32<16>; -} - -frame_support::parameter_types! { - pub const ExistentialDeposit: u64 = 10; - pub const MaxReserves: u32 = 50; -} - -impl pallet_balances::Config for TestRuntime { - type MaxLocks = (); - type Balance = Balance; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Pallet; - type WeightInfo = (); - type MaxReserves = MaxReserves; - type ReserveIdentifier = [u8; 8]; -} - -frame_support::parameter_types! { - pub const BridgedChainId: ChainId = *b"inst"; - pub const OutboundMessageLaneId: LaneId = *b"lane"; -} - -impl pallet_bridge_token_swap::Config for TestRuntime { - type Event = Event; - type WeightInfo = (); - - type BridgedChainId = BridgedChainId; - type OutboundMessageLaneId = OutboundMessageLaneId; - type MessagesBridge = TestMessagesBridge; - - type ThisCurrency = pallet_balances::Pallet; - type FromSwapToThisAccountIdConverter = TestAccountConverter; - - type BridgedChain = BridgedChain; - type FromBridgedToThisAccountIdConverter = TestAccountConverter; -} - -pub struct BridgedChain; - -impl bp_runtime::Chain for BridgedChain { - type BlockNumber = u64; - type Hash = H256; - type Hasher = BlakeTwo256; - type Header = sp_runtime::generic::Header; - - type AccountId = BridgedAccountId; - type Balance = BridgedBalance; - type Index = u64; - type Signature = BridgedAccountSignature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -pub struct TestMessagesBridge; - -impl MessagesBridge> - for TestMessagesBridge -{ - type Error = (); - - fn send_message( - sender: Origin, - lane: LaneId, - message: MessagePayloadOf, - delivery_and_dispatch_fee: Balance, - ) -> Result { - assert_eq!(lane, OutboundMessageLaneId::get()); - assert_eq!(delivery_and_dispatch_fee, SWAP_DELIVERY_AND_DISPATCH_FEE); - match sender.caller { - OriginCaller::TokenSwap(_) => (), - _ => panic!("unexpected origin"), - } - match message.call[0] { - OK_TRANSFER_CALL => Ok(SendMessageArtifacts { nonce: MESSAGE_NONCE, weight: 0 }), - BAD_TRANSFER_CALL => Err(()), - _ => unreachable!(), - } - } -} - -pub struct TestAccountConverter; - -impl sp_runtime::traits::Convert for TestAccountConverter { - fn convert(hash: H256) -> AccountId { - hash.to_low_u64_ne() - } -} - -/// Run pallet test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(THIS_CHAIN_ACCOUNT, THIS_CHAIN_ACCOUNT_BALANCE)], - } - .assimilate_storage(&mut t) - .unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(test) -} diff --git a/polkadot/bridges/modules/token-swap/src/weights.rs b/polkadot/bridges/modules/token-swap/src/weights.rs deleted file mode 100644 index 51c5d99de9c..00000000000 --- a/polkadot/bridges/modules/token-swap/src/weights.rs +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for `pallet_bridge_token_swap` -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-12-28, STEPS: 50, REPEAT: 20 -//! LOW RANGE: [], HIGH RANGE: [] -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled -//! CHAIN: Some("dev"), DB CACHE: 128 - -// Executed Command: -// target/release/millau-bridge-node -// benchmark -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_bridge_token_swap -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/token-swap/src/weights.rs -// --template=./.maintain/millau-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for `pallet_bridge_token_swap`. -pub trait WeightInfo { - fn create_swap() -> Weight; - fn claim_swap() -> Weight; - fn cancel_swap() -> Weight; -} - -/// Weights for `pallet_bridge_token_swap` using the Millau node and recommended hardware. -pub struct MillauWeight(PhantomData); -impl WeightInfo for MillauWeight { - fn create_swap() -> Weight { - (90_368_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } - fn claim_swap() -> Weight { - (88_397_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } - fn cancel_swap() -> Weight { - (91_253_000 as Weight) - .saturating_add(T::DbWeight::get().reads(3 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - fn create_swap() -> Weight { - (90_368_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } - fn claim_swap() -> Weight { - (88_397_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } - fn cancel_swap() -> Weight { - (91_253_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(3 as Weight)) - .saturating_add(RocksDbWeight::get().writes(3 as Weight)) - } -} diff --git a/polkadot/bridges/modules/token-swap/src/weights_ext.rs b/polkadot/bridges/modules/token-swap/src/weights_ext.rs deleted file mode 100644 index 2d27c76cbe6..00000000000 --- a/polkadot/bridges/modules/token-swap/src/weights_ext.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Weight-related utilities. - -use crate::weights::WeightInfo; - -use bp_runtime::Size; -use frame_support::weights::{RuntimeDbWeight, Weight}; - -/// Extended weight info. -pub trait WeightInfoExt: WeightInfo { - // Functions that are directly mapped to extrinsics weights. - - /// Weight of message send extrinsic. - fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight; -} - -impl WeightInfoExt for () { - fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight { - <() as pallet_bridge_messages::WeightInfoExt>::send_message_weight(message, db_weight) - } -} - -impl WeightInfoExt for crate::weights::MillauWeight { - fn send_message_weight(message: &impl Size, db_weight: RuntimeDbWeight) -> Weight { - <() as pallet_bridge_messages::WeightInfoExt>::send_message_weight(message, db_weight) - } -} diff --git a/polkadot/bridges/primitives/chain-kusama/Cargo.toml b/polkadot/bridges/primitives/chain-kusama/Cargo.toml deleted file mode 100644 index a676b565c33..00000000000 --- a/polkadot/bridges/primitives/chain-kusama/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "bp-kusama" -description = "Primitives of Kusama runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -smallvec = "1.7" - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", - "sp-version/std", -] diff --git a/polkadot/bridges/primitives/chain-kusama/src/lib.rs b/polkadot/bridges/primitives/chain-kusama/src/lib.rs deleted file mode 100644 index a0a5990ca08..00000000000 --- a/polkadot/bridges/primitives/chain-kusama/src/lib.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use bp_messages::{LaneId, MessageDetails, MessageNonce}; -use frame_support::weights::{ - WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, -}; -use sp_runtime::FixedU128; -use sp_std::prelude::*; -use sp_version::RuntimeVersion; - -pub use bp_polkadot_core::*; - -/// Kusama Chain -pub type Kusama = PolkadotLike; - -// NOTE: This needs to be kept up to date with the Kusama runtime found in the Polkadot repo. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("kusama"), - impl_name: sp_version::create_runtime_str!("parity-kusama"), - authoring_version: 2, - spec_version: 9180, - impl_version: 0, - apis: sp_version::create_apis_vec![[]], - transaction_version: 11, - state_version: 0, -}; - -// NOTE: This needs to be kept up to date with the Kusama runtime found in the Polkadot repo. -pub struct WeightToFee; -impl WeightToFeePolynomial for WeightToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - const CENTS: Balance = 1_000_000_000_000 / 30_000; - // in Kusama, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - let p = CENTS; - let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); - smallvec::smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } -} - -// We use this to get the account on Kusama (target) which is derived from Polkadot's (source) -// account. -pub fn derive_account_from_polkadot_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::POLKADOT_CHAIN_ID, id); - AccountIdConverter::convert(encoded_id) -} - -/// Per-byte fee for Kusama transactions. -pub const TRANSACTION_BYTE_FEE: Balance = 10 * 1_000_000_000_000 / 30_000 / 1_000; - -/// Existential deposit on Kusama. -pub const EXISTENTIAL_DEPOSIT: Balance = 1_000_000_000_000 / 30_000; - -/// The target length of a session (how often authorities change) on Kusama measured in of number of -/// blocks. -/// -/// Note that since this is a target sessions may change before/after this time depending on network -/// conditions. -pub const SESSION_LENGTH: BlockNumber = time_units::HOURS; - -/// Name of the With-Kusama GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_KUSAMA_GRANDPA_PALLET_NAME: &str = "BridgeKusamaGrandpa"; -/// Name of the With-Kusama messages pallet instance that is deployed at bridged chains. -pub const WITH_KUSAMA_MESSAGES_PALLET_NAME: &str = "BridgeKusamaMessages"; - -/// Name of the transaction payment pallet at the Kusama runtime. -pub const TRANSACTION_PAYMENT_PALLET_NAME: &str = "TransactionPayment"; - -/// Name of the DOT->KSM conversion rate stored in the Kusama runtime. -pub const POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME: &str = - "PolkadotToKusamaConversionRate"; -/// Name of the Polkadot fee multiplier parameter, stored in the Polkadot runtime. -pub const POLKADOT_FEE_MULTIPLIER_PARAMETER_NAME: &str = "PolkadotFeeMultiplier"; - -/// Name of the `KusamaFinalityApi::best_finalized` runtime method. -pub const BEST_FINALIZED_KUSAMA_HEADER_METHOD: &str = "KusamaFinalityApi_best_finalized"; - -/// Name of the `ToKusamaOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime -/// method. -pub const TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToKusamaOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToKusamaOutboundLaneApi::message_details` runtime method. -pub const TO_KUSAMA_MESSAGE_DETAILS_METHOD: &str = "ToKusamaOutboundLaneApi_message_details"; - -sp_api::decl_runtime_apis! { - /// API for querying information about the finalized Kusama headers. - /// - /// This API is implemented by runtimes that are bridging with the Kusama chain, not the - /// Kusama runtime itself. - pub trait KusamaFinalityApi { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> (BlockNumber, Hash); - } - - /// Outbound message lane API for messages that are sent to Kusama chain. - /// - /// This API is implemented by runtimes that are sending messages to Kusama chain, not the - /// Kusama runtime itself. - pub trait ToKusamaOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Kusama from this chain. - /// - /// Please keep in mind that this method returns the lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - kusama_to_this_conversion_rate: Option, - ) -> Option; - /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all - /// messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec>; - } -} diff --git a/polkadot/bridges/primitives/chain-millau/Cargo.toml b/polkadot/bridges/primitives/chain-millau/Cargo.toml deleted file mode 100644 index 0aaeb5b6bf9..00000000000 --- a/polkadot/bridges/primitives/chain-millau/Cargo.toml +++ /dev/null @@ -1,54 +0,0 @@ -[package] -name = "bp-millau" -description = "Primitives of Millau runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -fixed-hash = { version = "0.7.0", default-features = false } -hash256-std-hasher = { version = "0.15.2", default-features = false } -impl-codec = { version = "0.6", default-features = false } -impl-serde = { version = "0.3.1", optional = true } -parity-util-mem = { version = "0.11", default-features = false, features = ["primitive-types"] } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true, features = ["derive"] } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "fixed-hash/std", - "frame-support/std", - "frame-system/std", - "hash256-std-hasher/std", - "impl-codec/std", - "impl-serde", - "parity-util-mem/std", - "scale-info/std", - "serde", - "sp-api/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", -] diff --git a/polkadot/bridges/primitives/chain-millau/src/lib.rs b/polkadot/bridges/primitives/chain-millau/src/lib.rs deleted file mode 100644 index ff8d5385953..00000000000 --- a/polkadot/bridges/primitives/chain-millau/src/lib.rs +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -mod millau_hash; - -use bp_messages::{LaneId, MessageDetails, MessageNonce}; -use bp_runtime::Chain; -use frame_support::{ - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight}, - Parameter, RuntimeDebug, -}; -use frame_system::limits; -use scale_info::TypeInfo; -use sp_core::{storage::StateVersion, Hasher as HasherT}; -use sp_runtime::{ - traits::{Convert, IdentifyAccount, Verify}, - FixedU128, MultiSignature, MultiSigner, Perbill, -}; -use sp_std::prelude::*; -use sp_trie::{LayoutV0, LayoutV1, TrieConfiguration}; - -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - -pub use millau_hash::MillauHash; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// Millau chain. This mostly depends on number of entries (and their density) in the storage trie. -/// Some reserve is reserved to account future chain growth. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Number of bytes, included in the signed Millau transaction apart from the encoded call itself. -/// -/// Can be computed by subtracting encoded call size from raw transaction size. -pub const TX_EXTRA_BYTES: u32 = 103; - -/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. -pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; - -/// Maximum weight of single Millau block. -/// -/// This represents 0.5 seconds of compute assuming a target block time of six seconds. -pub const MAXIMUM_BLOCK_WEIGHT: Weight = WEIGHT_PER_SECOND / 2; - -/// Represents the average portion of a block's weight that will be used by an -/// `on_initialize()` runtime call. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); - -/// Represents the portion of a block that will be used by Normal extrinsics. -pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// Maximal number of unrewarded relayer entries in Millau confirmation transaction. -pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 128; - -/// Maximal number of unconfirmed messages in Millau confirmation transaction. -pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 128; - -/// Weight of single regular message delivery transaction on Millau chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` -/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be -/// rounded up to account possible future runtime upgrades. -pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; - -/// Increase of delivery transaction weight on Millau chain with every additional message byte. -/// -/// This value is a result of -/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then -/// must be rounded up to account possible future runtime upgrades. -pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; - -/// Maximal weight of single message delivery confirmation transaction on Millau chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` -/// weight formula computation for the case when single message is confirmed. The result then must -/// be rounded up to account possible future runtime upgrades. -pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; - -/// Weight of pay-dispatch-fee operation for inbound messages at Millau chain. -/// -/// This value corresponds to the result of -/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your -/// chain. Don't put too much reserve there, because it is used to **decrease** -/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery -/// transactions cheaper. -pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 700_000_000; - -/// The target length of a session (how often authorities change) on Millau measured in of number of -/// blocks. -/// -/// Note that since this is a target sessions may change before/after this time depending on network -/// conditions. -pub const SESSION_LENGTH: BlockNumber = 5 * time_units::MINUTES; - -/// Re-export `time_units` to make usage easier. -pub use time_units::*; - -/// Human readable time units defined in terms of number of blocks. -pub mod time_units { - use super::BlockNumber; - - pub const MILLISECS_PER_BLOCK: u64 = 6000; - pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - - pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); - pub const HOURS: BlockNumber = MINUTES * 60; - pub const DAYS: BlockNumber = HOURS * 24; -} - -/// Block number type used in Millau. -pub type BlockNumber = u64; - -/// Hash type used in Millau. -pub type Hash = ::Out; - -/// Type of object that can produce hashes on Millau. -pub type Hasher = BlakeTwoAndKeccak256; - -/// The header type used by Millau. -pub type Header = sp_runtime::generic::Header; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = MultiSignature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = <::Signer as IdentifyAccount>::AccountId; - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// Balance of an account. -pub type Balance = u64; - -/// Index of a transaction in the chain. -pub type Index = u32; - -/// Weight-to-Fee type used by Millau. -pub type WeightToFee = IdentityFee; - -/// Millau chain. -#[derive(RuntimeDebug)] -pub struct Millau; - -impl Chain for Millau { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Index = Index; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -/// Millau Hasher (Blake2-256 ++ Keccak-256) implementation. -#[derive(PartialEq, Eq, Clone, Copy, RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct BlakeTwoAndKeccak256; - -impl sp_core::Hasher for BlakeTwoAndKeccak256 { - type Out = MillauHash; - type StdHasher = hash256_std_hasher::Hash256StdHasher; - const LENGTH: usize = 64; - - fn hash(s: &[u8]) -> Self::Out { - let mut combined_hash = MillauHash::default(); - combined_hash.as_mut()[..32].copy_from_slice(&sp_io::hashing::blake2_256(s)); - combined_hash.as_mut()[32..].copy_from_slice(&sp_io::hashing::keccak_256(s)); - combined_hash - } -} - -impl sp_runtime::traits::Hash for BlakeTwoAndKeccak256 { - type Output = MillauHash; - - fn trie_root(input: Vec<(Vec, Vec)>, state_version: StateVersion) -> Self::Output { - match state_version { - StateVersion::V0 => LayoutV0::::trie_root(input), - StateVersion::V1 => LayoutV1::::trie_root(input), - } - } - - fn ordered_trie_root(input: Vec>, state_version: StateVersion) -> Self::Output { - match state_version { - StateVersion::V0 => LayoutV0::::ordered_trie_root(input), - StateVersion::V1 => LayoutV1::::ordered_trie_root(input), - } - } -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl sp_runtime::traits::Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -/// We use this to get the account on Millau (target) which is derived from Rialto's (source) -/// account. We do this so we can fund the derived account on Millau at Genesis to it can pay -/// transaction fees. -/// -/// The reason we can use the same `AccountId` type for both chains is because they share the same -/// development seed phrase. -/// -/// Note that this should only be used for testing. -pub fn derive_account_from_rialto_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::RIALTO_CHAIN_ID, id); - AccountIdConverter::convert(encoded_id) -} - -frame_support::parameter_types! { - pub BlockLength: limits::BlockLength = - limits::BlockLength::max_with_normal_ratio(2 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - // Allowance for Normal class - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // Allowance for Operational class - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Extra reserved space for Operational class - weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // By default Mandatory class is not limited at all. - // This parameter is used to derive maximal size of a single extrinsic. - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -/// Name of the With-Millau GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_MILLAU_GRANDPA_PALLET_NAME: &str = "BridgeMillauGrandpa"; -/// Name of the With-Millau messages pallet instance that is deployed at bridged chains. -pub const WITH_MILLAU_MESSAGES_PALLET_NAME: &str = "BridgeMillauMessages"; - -/// Name of the Rialto->Millau (actually DOT->KSM) conversion rate stored in the Millau runtime. -pub const RIALTO_TO_MILLAU_CONVERSION_RATE_PARAMETER_NAME: &str = "RialtoToMillauConversionRate"; - -/// Name of the With-Rialto token swap pallet instance in the Millau runtime. -pub const WITH_RIALTO_TOKEN_SWAP_PALLET_NAME: &str = "BridgeRialtoTokenSwap"; - -/// Name of the `MillauFinalityApi::best_finalized` runtime method. -pub const BEST_FINALIZED_MILLAU_HEADER_METHOD: &str = "MillauFinalityApi_best_finalized"; - -/// Name of the `ToMillauOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime -/// method. -pub const TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToMillauOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToMillauOutboundLaneApi::message_details` runtime method. -pub const TO_MILLAU_MESSAGE_DETAILS_METHOD: &str = "ToMillauOutboundLaneApi_message_details"; - -sp_api::decl_runtime_apis! { - /// API for querying information about the finalized Millau headers. - /// - /// This API is implemented by runtimes that are bridging with the Millau chain, not the - /// Millau runtime itself. - pub trait MillauFinalityApi { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> (BlockNumber, Hash); - } - - /// Outbound message lane API for messages that are sent to Millau chain. - /// - /// This API is implemented by runtimes that are sending messages to Millau chain, not the - /// Millau runtime itself. - pub trait ToMillauOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Millau from this chain. - /// - /// Please keep in mind that this method returns the lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - millau_to_this_conversion_rate: Option, - ) -> Option; - /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all - /// messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec>; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::codec::Encode; - - #[test] - fn maximal_account_size_does_not_overflow_constant() { - assert!( - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize >= AccountId::from([0u8; 32]).encode().len(), - "Actual maximal size of encoded AccountId ({}) overflows expected ({})", - AccountId::from([0u8; 32]).encode().len(), - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - ); - } -} diff --git a/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs b/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs deleted file mode 100644 index 11968b2f282..00000000000 --- a/polkadot/bridges/primitives/chain-millau/src/millau_hash.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use parity_util_mem::MallocSizeOf; -use scale_info::TypeInfo; -use sp_runtime::traits::CheckEqual; - -// `sp_core::H512` can't be used, because it doesn't implement `CheckEqual`, which is required -// by `frame_system::Config::Hash`. - -fixed_hash::construct_fixed_hash! { - /// Hash type used in Millau chain. - #[derive(MallocSizeOf, TypeInfo)] - pub struct MillauHash(64); -} - -#[cfg(feature = "std")] -impl_serde::impl_fixed_hash_serde!(MillauHash, 64); - -impl_codec::impl_fixed_hash_codec!(MillauHash, 64); - -impl CheckEqual for MillauHash { - #[cfg(feature = "std")] - fn check_equal(&self, other: &Self) { - use sp_core::hexdisplay::HexDisplay; - if self != other { - println!( - "Hash: given={}, expected={}", - HexDisplay::from(self.as_fixed_bytes()), - HexDisplay::from(other.as_fixed_bytes()), - ); - } - } - - #[cfg(not(feature = "std"))] - fn check_equal(&self, other: &Self) { - use frame_support::Printable; - - if self != other { - "Hash not equal".print(); - self.as_bytes().print(); - other.as_bytes().print(); - } - } -} diff --git a/polkadot/bridges/primitives/chain-polkadot/Cargo.toml b/polkadot/bridges/primitives/chain-polkadot/Cargo.toml deleted file mode 100644 index 738899b658c..00000000000 --- a/polkadot/bridges/primitives/chain-polkadot/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "bp-polkadot" -description = "Primitives of Polkadot runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -smallvec = "1.7" - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", - "sp-version/std", -] diff --git a/polkadot/bridges/primitives/chain-polkadot/src/lib.rs b/polkadot/bridges/primitives/chain-polkadot/src/lib.rs deleted file mode 100644 index d95e29c8b0c..00000000000 --- a/polkadot/bridges/primitives/chain-polkadot/src/lib.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use bp_messages::{LaneId, MessageDetails, MessageNonce}; -use frame_support::weights::{ - WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, -}; -use sp_runtime::FixedU128; -use sp_std::prelude::*; -use sp_version::RuntimeVersion; - -pub use bp_polkadot_core::*; - -/// Polkadot Chain -pub type Polkadot = PolkadotLike; - -// NOTE: This needs to be kept up to date with the Polkadot runtime found in the Polkadot repo. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("polkadot"), - impl_name: sp_version::create_runtime_str!("parity-polkadot"), - authoring_version: 0, - spec_version: 9180, - impl_version: 0, - apis: sp_version::create_apis_vec![[]], - transaction_version: 12, - state_version: 0, -}; - -// NOTE: This needs to be kept up to date with the Polkadot runtime found in the Polkadot repo. -pub struct WeightToFee; -impl WeightToFeePolynomial for WeightToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - const CENTS: Balance = 10_000_000_000 / 100; - // in Polkadot, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - let p = CENTS; - let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); - smallvec::smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } -} - -// We use this to get the account on Polkadot (target) which is derived from Kusama's (source) -// account. -pub fn derive_account_from_kusama_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::KUSAMA_CHAIN_ID, id); - AccountIdConverter::convert(encoded_id) -} - -/// Per-byte fee for Polkadot transactions. -pub const TRANSACTION_BYTE_FEE: Balance = 10 * 10_000_000_000 / 100 / 1_000; - -/// Existential deposit on Polkadot. -pub const EXISTENTIAL_DEPOSIT: Balance = 10_000_000_000; - -/// The target length of a session (how often authorities change) on Polkadot measured in of number -/// of blocks. -/// -/// Note that since this is a target sessions may change before/after this time depending on network -/// conditions. -pub const SESSION_LENGTH: BlockNumber = 4 * time_units::HOURS; - -/// Name of the With-Polkadot GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_POLKADOT_GRANDPA_PALLET_NAME: &str = "BridgePolkadotGrandpa"; -/// Name of the With-Polkadot messages pallet instance that is deployed at bridged chains. -pub const WITH_POLKADOT_MESSAGES_PALLET_NAME: &str = "BridgePolkadotMessages"; - -/// Name of the transaction payment pallet at the Polkadot runtime. -pub const TRANSACTION_PAYMENT_PALLET_NAME: &str = "TransactionPayment"; - -/// Name of the KSM->DOT conversion rate parameter, stored in the Polkadot runtime. -pub const KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME: &str = - "KusamaToPolkadotConversionRate"; -/// Name of the Kusama fee multiplier parameter, stored in the Polkadot runtime. -pub const KUSAMA_FEE_MULTIPLIER_PARAMETER_NAME: &str = "KusamaFeeMultiplier"; - -/// Name of the `PolkadotFinalityApi::best_finalized` runtime method. -pub const BEST_FINALIZED_POLKADOT_HEADER_METHOD: &str = "PolkadotFinalityApi_best_finalized"; - -/// Name of the `ToPolkadotOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime -/// method. -pub const TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToPolkadotOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToPolkadotOutboundLaneApi::message_details` runtime method. -pub const TO_POLKADOT_MESSAGE_DETAILS_METHOD: &str = "ToPolkadotOutboundLaneApi_message_details"; - -sp_api::decl_runtime_apis! { - /// API for querying information about the finalized Polkadot headers. - /// - /// This API is implemented by runtimes that are bridging with the Polkadot chain, not the - /// Polkadot runtime itself. - pub trait PolkadotFinalityApi { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> (BlockNumber, Hash); - } - - /// Outbound message lane API for messages that are sent to Polkadot chain. - /// - /// This API is implemented by runtimes that are sending messages to Polkadot chain, not the - /// Polkadot runtime itself. - pub trait ToPolkadotOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Polkadot from this chain. - /// - /// Please keep in mind that this method returns the lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - polkadot_to_this_conversion_rate: Option, - ) -> Option; - /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all - /// messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec>; - } -} diff --git a/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml b/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml deleted file mode 100644 index a15c4092957..00000000000 --- a/polkadot/bridges/primitives/chain-rialto-parachain/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-rialto-parachain" -description = "Primitives of Rialto parachain runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs b/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs deleted file mode 100644 index f3f449c7af3..00000000000 --- a/polkadot/bridges/primitives/chain-rialto-parachain/src/lib.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use bp_runtime::Chain; -use frame_support::{ - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight}, - RuntimeDebug, -}; -use frame_system::limits; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - traits::{BlakeTwo256, IdentifyAccount, Verify}, - MultiSignature, MultiSigner, Perbill, -}; - -/// Maximal weight of single Rialto parachain block. -/// -/// This represents two seconds of compute assuming a target block time of six seconds. -pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; - -/// Represents the average portion of a block's weight that will be used by an -/// `on_initialize()` runtime call. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); - -/// Represents the portion of a block that will be used by Normal extrinsics. -pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// Block number type used in Rialto. -pub type BlockNumber = u32; - -/// Hash type used in Rialto. -pub type Hash = ::Out; - -/// The type of object that can produce hashes on Rialto. -pub type Hasher = BlakeTwo256; - -/// The header type used by Rialto. -pub type Header = sp_runtime::generic::Header; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = MultiSignature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = <::Signer as IdentifyAccount>::AccountId; - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// Balance of an account. -pub type Balance = u128; - -/// An instant or duration in time. -pub type Moment = u64; - -/// Index of a transaction in the parachain. -pub type Index = u32; - -/// Weight-to-Fee type used by Rialto parachain. -pub type WeightToFee = IdentityFee; - -/// Rialto parachain. -#[derive(RuntimeDebug)] -pub struct RialtoParachain; - -impl Chain for RialtoParachain { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Index = Index; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -frame_support::parameter_types! { - pub BlockLength: limits::BlockLength = - limits::BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - // Allowance for Normal class - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // Allowance for Operational class - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Extra reserved space for Operational class - weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // By default Mandatory class is not limited at all. - // This parameter is used to derive maximal size of a single extrinsic. - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} diff --git a/polkadot/bridges/primitives/chain-rialto/Cargo.toml b/polkadot/bridges/primitives/chain-rialto/Cargo.toml deleted file mode 100644 index 663f9076657..00000000000 --- a/polkadot/bridges/primitives/chain-rialto/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-rialto" -description = "Primitives of Rialto runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/chain-rialto/src/lib.rs b/polkadot/bridges/primitives/chain-rialto/src/lib.rs deleted file mode 100644 index 4bf20489bc8..00000000000 --- a/polkadot/bridges/primitives/chain-rialto/src/lib.rs +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use bp_messages::{LaneId, MessageDetails, MessageNonce}; -use bp_runtime::Chain; -use frame_support::{ - weights::{constants::WEIGHT_PER_SECOND, DispatchClass, IdentityFee, Weight}, - Parameter, RuntimeDebug, -}; -use frame_system::limits; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - traits::{BlakeTwo256, Convert, IdentifyAccount, Verify}, - FixedU128, MultiSignature, MultiSigner, Perbill, -}; -use sp_std::prelude::*; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// Rialto chain. This mostly depends on number of entries (and their density) in the storage trie. -/// Some reserve is reserved to account future chain growth. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Number of bytes, included in the signed Rialto transaction apart from the encoded call itself. -/// -/// Can be computed by subtracting encoded call size from raw transaction size. -pub const TX_EXTRA_BYTES: u32 = 104; - -/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. -pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; - -/// Maximal weight of single Rialto block. -/// -/// This represents two seconds of compute assuming a target block time of six seconds. -pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; - -/// Represents the average portion of a block's weight that will be used by an -/// `on_initialize()` runtime call. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); - -/// Represents the portion of a block that will be used by Normal extrinsics. -pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// Maximal number of unrewarded relayer entries in Rialto confirmation transaction. -pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; - -/// Maximal number of unconfirmed messages in Rialto confirmation transaction. -pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 1024; - -/// Weight of single regular message delivery transaction on Rialto chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` -/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be -/// rounded up to account possible future runtime upgrades. -pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; - -/// Increase of delivery transaction weight on Rialto chain with every additional message byte. -/// -/// This value is a result of -/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then -/// must be rounded up to account possible future runtime upgrades. -pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; - -/// Maximal weight of single message delivery confirmation transaction on Rialto chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` -/// weight formula computation for the case when single message is confirmed. The result then must -/// be rounded up to account possible future runtime upgrades. -pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; - -/// Weight of pay-dispatch-fee operation for inbound messages at Rialto chain. -/// -/// This value corresponds to the result of -/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your -/// chain. Don't put too much reserve there, because it is used to **decrease** -/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery -/// transactions cheaper. -pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 700_000_000; - -/// The target length of a session (how often authorities change) on Rialto measured in of number of -/// blocks. -/// -/// Note that since this is a target sessions may change before/after this time depending on network -/// conditions. -pub const SESSION_LENGTH: BlockNumber = 4; - -/// Re-export `time_units` to make usage easier. -pub use time_units::*; - -/// Human readable time units defined in terms of number of blocks. -pub mod time_units { - use super::{BlockNumber, SESSION_LENGTH}; - - pub const MILLISECS_PER_BLOCK: u64 = 6000; - pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - - pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); - pub const HOURS: BlockNumber = MINUTES * 60; - pub const DAYS: BlockNumber = HOURS * 24; - - pub const EPOCH_DURATION_IN_SLOTS: BlockNumber = SESSION_LENGTH; - - // 1 in 4 blocks (on average, not counting collisions) will be primary babe blocks. - pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); -} - -/// Block number type used in Rialto. -pub type BlockNumber = u32; - -/// Hash type used in Rialto. -pub type Hash = ::Out; - -/// The type of object that can produce hashes on Rialto. -pub type Hasher = BlakeTwo256; - -/// The header type used by Rialto. -pub type Header = sp_runtime::generic::Header; - -/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. -pub type Signature = MultiSignature; - -/// Some way of identifying an account on the chain. We intentionally make it equivalent -/// to the public key of our transaction signing scheme. -pub type AccountId = <::Signer as IdentifyAccount>::AccountId; - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// Balance of an account. -pub type Balance = u128; - -/// An instant or duration in time. -pub type Moment = u64; - -/// Index of a transaction in the chain. -pub type Index = u32; - -/// Weight-to-Fee type used by Rialto. -pub type WeightToFee = IdentityFee; - -/// Rialto chain. -#[derive(RuntimeDebug)] -pub struct Rialto; - -impl Chain for Rialto { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Index = Index; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -// We use this to get the account on Rialto (target) which is derived from Millau's (source) -// account. We do this so we can fund the derived account on Rialto at Genesis to it can pay -// transaction fees. -// -// The reason we can use the same `AccountId` type for both chains is because they share the same -// development seed phrase. -// -// Note that this should only be used for testing. -pub fn derive_account_from_millau_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::MILLAU_CHAIN_ID, id); - AccountIdConverter::convert(encoded_id) -} - -frame_support::parameter_types! { - pub BlockLength: limits::BlockLength = - limits::BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - // Allowance for Normal class - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // Allowance for Operational class - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Extra reserved space for Operational class - weights.reserved = Some(MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - // By default Mandatory class is not limited at all. - // This parameter is used to derive maximal size of a single extrinsic. - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -/// Name of the With-Rialto GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_RIALTO_GRANDPA_PALLET_NAME: &str = "BridgeRialtoGrandpa"; -/// Name of the With-Rialto messages pallet instance that is deployed at bridged chains. -pub const WITH_RIALTO_MESSAGES_PALLET_NAME: &str = "BridgeRialtoMessages"; - -/// Name of the Millau->Rialto (actually KSM->DOT) conversion rate stored in the Rialto runtime. -pub const MILLAU_TO_RIALTO_CONVERSION_RATE_PARAMETER_NAME: &str = "MillauToRialtoConversionRate"; - -/// Name of the parachain registrar pallet in the Rialto runtime. -pub const PARAS_REGISTRAR_PALLET_NAME: &str = "Registrar"; - -/// Name of the parachains pallet in the Rialto runtime. -pub const PARAS_PALLET_NAME: &str = "Paras"; - -/// Name of the `RialtoFinalityApi::best_finalized` runtime method. -pub const BEST_FINALIZED_RIALTO_HEADER_METHOD: &str = "RialtoFinalityApi_best_finalized"; - -/// Name of the `ToRialtoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime -/// method. -pub const TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToRialtoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToRialtoOutboundLaneApi::message_details` runtime method. -pub const TO_RIALTO_MESSAGE_DETAILS_METHOD: &str = "ToRialtoOutboundLaneApi_message_details"; - -sp_api::decl_runtime_apis! { - /// API for querying information about the finalized Rialto headers. - /// - /// This API is implemented by runtimes that are bridging with the Rialto chain, not the - /// Millau runtime itself. - pub trait RialtoFinalityApi { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> (BlockNumber, Hash); - } - - /// Outbound message lane API for messages that are sent to Rialto chain. - /// - /// This API is implemented by runtimes that are sending messages to Rialto chain, not the - /// Rialto runtime itself. - pub trait ToRialtoOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Rialto from this chain. - /// - /// Please keep in mind that this method returns the lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - rialto_to_this_conversion_rate: Option, - ) -> Option; - /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all - /// messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec>; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::codec::Encode; - - #[test] - fn maximal_account_size_does_not_overflow_constant() { - assert!( - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize >= AccountId::from([0u8; 32]).encode().len(), - "Actual maximal size of encoded AccountId ({}) overflows expected ({})", - AccountId::from([0u8; 32]).encode().len(), - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - ); - } -} diff --git a/polkadot/bridges/primitives/chain-rococo/Cargo.toml b/polkadot/bridges/primitives/chain-rococo/Cargo.toml deleted file mode 100644 index 814cd09bf17..00000000000 --- a/polkadot/bridges/primitives/chain-rococo/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "bp-rococo" -description = "Primitives of Rococo runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"] } -smallvec = "1.7" - -# Bridge Dependencies -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "parity-scale-codec/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", - "sp-version/std", -] diff --git a/polkadot/bridges/primitives/chain-rococo/src/lib.rs b/polkadot/bridges/primitives/chain-rococo/src/lib.rs deleted file mode 100644 index 127e75d5f8b..00000000000 --- a/polkadot/bridges/primitives/chain-rococo/src/lib.rs +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use bp_messages::{LaneId, MessageDetails, MessageNonce}; -use frame_support::weights::{ - Weight, WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, -}; -use sp_runtime::FixedU128; -use sp_std::prelude::*; -use sp_version::RuntimeVersion; - -pub use bp_polkadot_core::*; - -/// Rococo Chain -pub type Rococo = PolkadotLike; - -/// The target length of a session (how often authorities change) on Rococo measured in of number -/// of blocks. -/// -/// Note that since this is a target sessions may change before/after this time depending on network -/// conditions. -pub const SESSION_LENGTH: BlockNumber = time_units::HOURS; - -// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("rococo"), - impl_name: sp_version::create_runtime_str!("parity-rococo-v2.0"), - authoring_version: 0, - spec_version: 9180, - impl_version: 0, - apis: sp_version::create_apis_vec![[]], - transaction_version: 0, - state_version: 0, -}; - -// NOTE: This needs to be kept up to date with the Rococo runtime found in the Polkadot repo. -pub struct WeightToFee; -impl WeightToFeePolynomial for WeightToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - const CENTS: Balance = 1_000_000_000_000 / 100; - let p = CENTS; - let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); - smallvec::smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } -} - -// We use this to get the account on Rococo (target) which is derived from Wococo's (source) -// account. -pub fn derive_account_from_wococo_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::WOCOCO_CHAIN_ID, id); - AccountIdConverter::convert(encoded_id) -} - -/// Name of the With-Rococo GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_ROCOCO_GRANDPA_PALLET_NAME: &str = "BridgeRococoGrandpa"; -/// Name of the With-Rococo messages pallet instance that is deployed at bridged chains. -pub const WITH_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessages"; - -/// Name of the `RococoFinalityApi::best_finalized` runtime method. -pub const BEST_FINALIZED_ROCOCO_HEADER_METHOD: &str = "RococoFinalityApi_best_finalized"; - -/// Name of the `ToRococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime -/// method. -pub const TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToRococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToRococoOutboundLaneApi::message_details` runtime method. -pub const TO_ROCOCO_MESSAGE_DETAILS_METHOD: &str = "ToRococoOutboundLaneApi_message_details"; - -/// Existential deposit on Rococo. -pub const EXISTENTIAL_DEPOSIT: Balance = 1_000_000_000_000 / 100; - -/// Weight of pay-dispatch-fee operation for inbound messages at Rococo chain. -/// -/// This value corresponds to the result of -/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your -/// chain. Don't put too much reserve there, because it is used to **decrease** -/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery -/// transactions cheaper. -pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; - -sp_api::decl_runtime_apis! { - /// API for querying information about the finalized Rococo headers. - /// - /// This API is implemented by runtimes that are bridging with the Rococo chain, not the - /// Rococo runtime itself. - pub trait RococoFinalityApi { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> (BlockNumber, Hash); - } - - /// Outbound message lane API for messages that are sent to Rococo chain. - /// - /// This API is implemented by runtimes that are sending messages to Rococo chain, not the - /// Rococo runtime itself. - pub trait ToRococoOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Rococo from this chain. - /// - /// Please keep in mind that this method returns the lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - rococo_to_this_conversion_rate: Option, - ) -> Option; - /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all - /// messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec>; - } -} diff --git a/polkadot/bridges/primitives/chain-westend/Cargo.toml b/polkadot/bridges/primitives/chain-westend/Cargo.toml deleted file mode 100644 index ee6e2b9be99..00000000000 --- a/polkadot/bridges/primitives/chain-westend/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "bp-westend" -description = "Primitives of Westend runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -smallvec = "1.7" - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "parity-scale-codec/std", - "scale-info/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", - "sp-version/std", -] diff --git a/polkadot/bridges/primitives/chain-westend/src/lib.rs b/polkadot/bridges/primitives/chain-westend/src/lib.rs deleted file mode 100644 index c7ebe4b00fd..00000000000 --- a/polkadot/bridges/primitives/chain-westend/src/lib.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use frame_support::weights::{ - WeightToFeeCoefficient, WeightToFeeCoefficients, WeightToFeePolynomial, -}; -use scale_info::TypeInfo; -use sp_std::prelude::*; -use sp_version::RuntimeVersion; - -pub use bp_polkadot_core::*; - -/// Westend Chain -pub type Westend = PolkadotLike; - -// NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo. -pub struct WeightToFee; -impl WeightToFeePolynomial for WeightToFee { - type Balance = Balance; - fn polynomial() -> WeightToFeeCoefficients { - const CENTS: Balance = 1_000_000_000_000 / 1_000; - // in Westend, extrinsic base weight (smallest non-zero weight) is mapped to 1/10 CENT: - let p = CENTS; - let q = 10 * Balance::from(ExtrinsicBaseWeight::get()); - smallvec::smallvec![WeightToFeeCoefficient { - degree: 1, - negative: false, - coeff_frac: Perbill::from_rational(p % q, q), - coeff_integer: p / q, - }] - } -} - -// NOTE: This needs to be kept up to date with the Westend runtime found in the Polkadot repo. -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("westend"), - impl_name: sp_version::create_runtime_str!("parity-westend"), - authoring_version: 2, - spec_version: 9140, - impl_version: 0, - apis: sp_version::create_apis_vec![[]], - transaction_version: 8, - state_version: 0, -}; - -/// Westend Runtime `Call` enum. -/// -/// We are not currently submitting any Westend transactions => it is empty. -#[derive( - parity_scale_codec::Encode, parity_scale_codec::Decode, Debug, PartialEq, Eq, Clone, TypeInfo, -)] -pub enum Call {} - -impl sp_runtime::traits::Dispatchable for Call { - type Origin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - unimplemented!("The Call is not expected to be dispatched.") - } -} - -// We use this to get the account on Westend (target) which is derived from Rococo's (source) -// account. -pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_CHAIN_ID, id); - AccountIdConverter::convert(encoded_id) -} - -/// Name of the With-Westend GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_WESTEND_GRANDPA_PALLET_NAME: &str = "BridgeWestendGrandpa"; - -/// Name of the `WestendFinalityApi::best_finalized` runtime method. -pub const BEST_FINALIZED_WESTEND_HEADER_METHOD: &str = "WestendFinalityApi_best_finalized"; - -/// The target length of a session (how often authorities change) on Westend measured in of number -/// of blocks. -/// -/// Note that since this is a target sessions may change before/after this time depending on network -/// conditions. -pub const SESSION_LENGTH: BlockNumber = 10 * time_units::MINUTES; - -sp_api::decl_runtime_apis! { - /// API for querying information about the finalized Westend headers. - /// - /// This API is implemented by runtimes that are bridging with the Westend chain, not the - /// Westend runtime itself. - pub trait WestendFinalityApi { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> (BlockNumber, Hash); - } -} diff --git a/polkadot/bridges/primitives/chain-wococo/Cargo.toml b/polkadot/bridges/primitives/chain-wococo/Cargo.toml deleted file mode 100644 index 633cdd15c1f..00000000000 --- a/polkadot/bridges/primitives/chain-wococo/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "bp-wococo" -description = "Primitives of Wococo runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"] } - -# Bridge Dependencies -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-rococo = { path = "../chain-rococo", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "bp-rococo/std", - "parity-scale-codec/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/chain-wococo/src/lib.rs b/polkadot/bridges/primitives/chain-wococo/src/lib.rs deleted file mode 100644 index f39543114c7..00000000000 --- a/polkadot/bridges/primitives/chain-wococo/src/lib.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use bp_messages::{LaneId, MessageDetails, MessageNonce}; -use sp_runtime::FixedU128; -use sp_std::prelude::*; - -pub use bp_polkadot_core::*; -// Rococo runtime = Wococo runtime -pub use bp_rococo::{WeightToFee, EXISTENTIAL_DEPOSIT, PAY_INBOUND_DISPATCH_FEE_WEIGHT, VERSION}; - -/// Wococo Chain -pub type Wococo = PolkadotLike; - -/// The target length of a session (how often authorities change) on Wococo measured in of number -/// of blocks. -/// -/// Note that since this is a target sessions may change before/after this time depending on network -/// conditions. -pub const SESSION_LENGTH: BlockNumber = time_units::MINUTES; - -// We use this to get the account on Wococo (target) which is derived from Rococo's (source) -// account. -pub fn derive_account_from_rococo_id(id: bp_runtime::SourceAccount) -> AccountId { - let encoded_id = bp_runtime::derive_account_id(bp_runtime::ROCOCO_CHAIN_ID, id); - AccountIdConverter::convert(encoded_id) -} - -/// Name of the With-Wococo GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_WOCOCO_GRANDPA_PALLET_NAME: &str = "BridgeWococoGrandpa"; -/// Name of the With-Wococo messages pallet instance that is deployed at bridged chains. -pub const WITH_WOCOCO_MESSAGES_PALLET_NAME: &str = "BridgeWococoMessages"; - -/// Name of the `WococoFinalityApi::best_finalized` runtime method. -pub const BEST_FINALIZED_WOCOCO_HEADER_METHOD: &str = "WococoFinalityApi_best_finalized"; - -/// Name of the `ToWococoOutboundLaneApi::estimate_message_delivery_and_dispatch_fee` runtime -/// method. -pub const TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD: &str = - "ToWococoOutboundLaneApi_estimate_message_delivery_and_dispatch_fee"; -/// Name of the `ToWococoOutboundLaneApi::message_details` runtime method. -pub const TO_WOCOCO_MESSAGE_DETAILS_METHOD: &str = "ToWococoOutboundLaneApi_message_details"; - -sp_api::decl_runtime_apis! { - /// API for querying information about the finalized Wococo headers. - /// - /// This API is implemented by runtimes that are bridging with the Wococo chain, not the - /// Wococo runtime itself. - pub trait WococoFinalityApi { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> (BlockNumber, Hash); - } - - /// Outbound message lane API for messages that are sent to Wococo chain. - /// - /// This API is implemented by runtimes that are sending messages to Wococo chain, not the - /// Wococo runtime itself. - pub trait ToWococoOutboundLaneApi { - /// Estimate message delivery and dispatch fee that needs to be paid by the sender on - /// this chain. - /// - /// Returns `None` if message is too expensive to be sent to Wococo from this chain. - /// - /// Please keep in mind that this method returns the lowest message fee required for message - /// to be accepted to the lane. It may be good idea to pay a bit over this price to account - /// future exchange rate changes and guarantee that relayer would deliver your message - /// to the target chain. - fn estimate_message_delivery_and_dispatch_fee( - lane_id: LaneId, - payload: OutboundPayload, - wococo_to_this_conversion_rate: Option, - ) -> Option; - /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all - /// messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, - ) -> Vec>; - } -} diff --git a/polkadot/bridges/primitives/header-chain/Cargo.toml b/polkadot/bridges/primitives/header-chain/Cargo.toml deleted file mode 100644 index b5f5d3cc03a..00000000000 --- a/polkadot/bridges/primitives/header-chain/Cargo.toml +++ /dev/null @@ -1,46 +0,0 @@ -[package] -name = "bp-header-chain" -description = "A common interface for describing what a bridge pallet should be able to do." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -finality-grandpa = { version = "0.16.0", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } -serde = { version = "1.0", optional = true } - -# Bridge dependencies - -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -bp-test-utils = { path = "../test-utils" } -hex = "0.4" -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "codec/std", - "finality-grandpa/std", - "scale-info/std", - "serde/std", - "frame-support/std", - "scale-info/std", - "sp-core/std", - "sp-finality-grandpa/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/header-chain/src/justification.rs b/polkadot/bridges/primitives/header-chain/src/justification.rs deleted file mode 100644 index 5f3b7251789..00000000000 --- a/polkadot/bridges/primitives/header-chain/src/justification.rs +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Pallet for checking GRANDPA Finality Proofs. -//! -//! Adapted copy of `substrate/client/finality-grandpa/src/justification.rs`. If origin -//! will ever be moved to the sp_finality_grandpa, we should reuse that implementation. - -use codec::{Decode, Encode}; -use finality_grandpa::voter_set::VoterSet; -use frame_support::RuntimeDebug; -use scale_info::TypeInfo; -use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId}; -use sp_runtime::traits::Header as HeaderT; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - prelude::*, -}; - -/// A GRANDPA Justification is a proof that a given header was finalized -/// at a certain height and with a certain set of authorities. -/// -/// This particular proof is used to prove that headers on a bridged chain -/// (so not our chain) have been finalized correctly. -#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct GrandpaJustification { - /// The round (voting period) this justification is valid for. - pub round: u64, - /// The set of votes for the chain which is to be finalized. - pub commit: - finality_grandpa::Commit, - /// A proof that the chain of blocks in the commit are related to each other. - pub votes_ancestries: Vec
, -} - -impl crate::FinalityProof for GrandpaJustification { - fn target_header_number(&self) -> H::Number { - self.commit.target_number - } -} - -/// Justification verification error. -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - /// Failed to decode justification. - JustificationDecode, - /// Justification is finalizing unexpected header. - InvalidJustificationTarget, - /// The authority has provided an invalid signature. - InvalidAuthoritySignature, - /// The justification contains precommit for header that is not a descendant of the commit - /// header. - PrecommitIsNotCommitDescendant, - /// The cumulative weight of all votes in the justification is not enough to justify commit - /// header finalization. - TooLowCumulativeWeight, - /// The justification contains extra (unused) headers in its `votes_ancestries` field. - ExtraHeadersInVotesAncestries, -} - -/// Decode justification target. -pub fn decode_justification_target( - raw_justification: &[u8], -) -> Result<(Header::Hash, Header::Number), Error> { - GrandpaJustification::
::decode(&mut &*raw_justification) - .map(|justification| (justification.commit.target_hash, justification.commit.target_number)) - .map_err(|_| Error::JustificationDecode) -} - -/// Verify that justification, that is generated by given authority set, finalizes given header. -pub fn verify_justification( - finalized_target: (Header::Hash, Header::Number), - authorities_set_id: SetId, - authorities_set: &VoterSet, - justification: &GrandpaJustification
, -) -> Result<(), Error> -where - Header::Number: finality_grandpa::BlockNumberOps, -{ - // ensure that it is justification for the expected header - if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { - return Err(Error::InvalidJustificationTarget) - } - - let mut chain = AncestryChain::new(&justification.votes_ancestries); - let mut signature_buffer = Vec::new(); - let mut votes = BTreeSet::new(); - let mut cumulative_weight = 0u64; - for signed in &justification.commit.precommits { - // authority must be in the set - let authority_info = match authorities_set.get(&signed.id) { - Some(authority_info) => authority_info, - None => { - // just ignore precommit from unknown authority as - // `finality_grandpa::import_precommit` does - continue - }, - }; - - // check if authority has already voted in the same round. - // - // there's a lot of code in `validate_commit` and `import_precommit` functions inside - // `finality-grandpa` crate (mostly related to reporing equivocations). But the only thing - // that we care about is that only first vote from the authority is accepted - if !votes.insert(signed.id.clone()) { - continue - } - - // everything below this line can't just `continue`, because state is already altered - - // all precommits must be for block higher than the target - if signed.precommit.target_number < justification.commit.target_number { - return Err(Error::PrecommitIsNotCommitDescendant) - } - // all precommits must be for target block descendents - chain = chain - .ensure_descendant(&justification.commit.target_hash, &signed.precommit.target_hash)?; - // since we know now that the precommit target is the descendant of the justification - // target, we may increase 'weight' of the justification target - // - // there's a lot of code in the `VoteGraph::insert` method inside `finality-grandpa` crate, - // but in the end it is only used to find GHOST, which we don't care about. The only thing - // that we care about is that the justification target has enough weight - cumulative_weight = cumulative_weight.checked_add(authority_info.weight().0.into()).expect( - "sum of weights of ALL authorities is expected not to overflow - this is guaranteed by\ - existence of VoterSet;\ - the order of loop conditions guarantees that we can account vote from same authority\ - multiple times;\ - thus we'll never overflow the u64::MAX;\ - qed", - ); - // verify authority signature - if !sp_finality_grandpa::check_message_signature_with_buffer( - &finality_grandpa::Message::Precommit(signed.precommit.clone()), - &signed.id, - &signed.signature, - justification.round, - authorities_set_id, - &mut signature_buffer, - ) { - return Err(Error::InvalidAuthoritySignature) - } - } - - // check that there are no extra headers in the justification - if !chain.unvisited.is_empty() { - return Err(Error::ExtraHeadersInVotesAncestries) - } - - // check that the cumulative weight of validators voted for the justification target (or one - // of its descendants) is larger than required threshold. - let threshold = authorities_set.threshold().0.into(); - if cumulative_weight >= threshold { - Ok(()) - } else { - Err(Error::TooLowCumulativeWeight) - } -} - -/// Votes ancestries with useful methods. -#[derive(RuntimeDebug)] -pub struct AncestryChain { - /// Header hash => parent header hash mapping. - pub parents: BTreeMap, - /// Hashes of headers that were not visited by `is_ancestor` method. - pub unvisited: BTreeSet, -} - -impl AncestryChain
{ - /// Create new ancestry chain. - pub fn new(ancestry: &[Header]) -> AncestryChain
{ - let mut parents = BTreeMap::new(); - let mut unvisited = BTreeSet::new(); - for ancestor in ancestry { - let hash = ancestor.hash(); - let parent_hash = *ancestor.parent_hash(); - parents.insert(hash, parent_hash); - unvisited.insert(hash); - } - AncestryChain { parents, unvisited } - } - - /// Returns `Err(_)` if `precommit_target` is a descendant of the `commit_target` block and - /// `Ok(_)` otherwise. - pub fn ensure_descendant( - mut self, - commit_target: &Header::Hash, - precommit_target: &Header::Hash, - ) -> Result { - let mut current_hash = *precommit_target; - loop { - if current_hash == *commit_target { - break - } - - let is_visited_before = !self.unvisited.remove(¤t_hash); - current_hash = match self.parents.get(¤t_hash) { - Some(parent_hash) => { - if is_visited_before { - // `Some(parent_hash)` means that the `current_hash` is in the `parents` - // container `is_visited_before` means that it has been visited before in - // some of previous calls => since we assume that previous call has finished - // with `true`, this also will be finished with `true` - return Ok(self) - } - - *parent_hash - }, - None => return Err(Error::PrecommitIsNotCommitDescendant), - }; - } - Ok(self) - } -} diff --git a/polkadot/bridges/primitives/header-chain/src/lib.rs b/polkadot/bridges/primitives/header-chain/src/lib.rs deleted file mode 100644 index 28949f28de5..00000000000 --- a/polkadot/bridges/primitives/header-chain/src/lib.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Defines traits which represent a common interface for Substrate pallets which want to -//! incorporate bridge functionality. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Codec, Decode, Encode, EncodeLike}; -use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; -use scale_info::TypeInfo; -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use sp_finality_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; -use sp_runtime::{generic::OpaqueDigestItemId, traits::Header as HeaderT, RuntimeDebug}; -use sp_std::boxed::Box; - -pub mod justification; -pub mod storage_keys; - -/// A type that can be used as a parameter in a dispatchable function. -/// -/// When using `decl_module` all arguments for call functions must implement this trait. -pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {} -impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {} - -/// A GRANDPA Authority List and ID. -#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Clone, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct AuthoritySet { - /// List of GRANDPA authorities for the current round. - pub authorities: AuthorityList, - /// Monotonic identifier of the current GRANDPA authority set. - pub set_id: SetId, -} - -impl AuthoritySet { - /// Create a new GRANDPA Authority Set. - pub fn new(authorities: AuthorityList, set_id: SetId) -> Self { - Self { authorities, set_id } - } -} - -/// Data required for initializing the bridge pallet. -/// -/// The bridge needs to know where to start its sync from, and this provides that initial context. -#[derive(Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct InitializationData { - /// The header from which we should start syncing. - pub header: Box, - /// The initial authorities of the pallet. - pub authority_list: AuthorityList, - /// The ID of the initial authority set. - pub set_id: SetId, - /// Should the pallet block transaction immediately after initialization. - pub is_halted: bool, -} - -/// base trait for verifying transaction inclusion proofs. -pub trait InclusionProofVerifier { - /// Transaction type. - type Transaction: Parameter; - /// Transaction inclusion proof type. - type TransactionInclusionProof: Parameter; - - /// Verify that transaction is a part of given block. - /// - /// Returns Some(transaction) if proof is valid and None otherwise. - fn verify_transaction_inclusion_proof( - proof: &Self::TransactionInclusionProof, - ) -> Option; -} - -/// A trait for pallets which want to keep track of finalized headers from a bridged chain. -pub trait HeaderChain { - /// Get the best finalized header known to the header chain. - fn best_finalized() -> H; - - /// Get the best authority set known to the header chain. - fn authority_set() -> AuthoritySet; - - /// Write a header finalized by GRANDPA to the underlying pallet storage. - fn append_header(header: H) -> Result<(), E>; -} - -impl HeaderChain for () { - fn best_finalized() -> H { - H::default() - } - - fn authority_set() -> AuthoritySet { - AuthoritySet::default() - } - - fn append_header(_header: H) -> Result<(), E> { - Ok(()) - } -} - -/// Abstract finality proof that is justifying block finality. -pub trait FinalityProof: Clone + Send + Sync + Debug { - /// Return number of header that this proof is generated for. - fn target_header_number(&self) -> Number; -} - -/// Find header digest that schedules next GRANDPA authorities set. -pub fn find_grandpa_authorities_scheduled_change( - header: &H, -) -> Option> { - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - - let filter_log = |log: ConsensusLog| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }; - - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) -} diff --git a/polkadot/bridges/primitives/header-chain/src/storage_keys.rs b/polkadot/bridges/primitives/header-chain/src/storage_keys.rs deleted file mode 100644 index e123703eed5..00000000000 --- a/polkadot/bridges/primitives/header-chain/src/storage_keys.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage keys of bridge GRANDPA pallet. - -/// Name of the `IsHalted` storage value. -pub const IS_HALTED_VALUE_NAME: &str = "IsHalted"; -/// Name of the `BestFinalized` storage value. -pub const BEST_FINALIZED_VALUE_NAME: &str = "BestFinalized"; - -use sp_core::storage::StorageKey; - -/// Storage key of the `IsHalted` flag in the runtime storage. -pub fn is_halted_key(pallet_prefix: &str) -> StorageKey { - StorageKey( - bp_runtime::storage_value_final_key( - pallet_prefix.as_bytes(), - IS_HALTED_VALUE_NAME.as_bytes(), - ) - .to_vec(), - ) -} - -/// Storage key of the best finalized header hash value in the runtime storage. -pub fn best_finalized_hash_key(pallet_prefix: &str) -> StorageKey { - StorageKey( - bp_runtime::storage_value_final_key( - pallet_prefix.as_bytes(), - BEST_FINALIZED_VALUE_NAME.as_bytes(), - ) - .to_vec(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn is_halted_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // compatibility with previous pallet. - let storage_key = is_halted_key("BridgeGrandpa").0; - assert_eq!( - storage_key, - hex!("0b06f475eddb98cf933a12262e0388de9611a984bbd04e2fd39f97bbc006115f").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn best_finalized_hash_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // compatibility with previous pallet. - let storage_key = best_finalized_hash_key("BridgeGrandpa").0; - assert_eq!( - storage_key, - hex!("0b06f475eddb98cf933a12262e0388dea4ebafdd473c549fdb24c5c991c5591c").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } -} diff --git a/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs b/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs deleted file mode 100644 index 0813c26dc3a..00000000000 --- a/polkadot/bridges/primitives/header-chain/tests/implementation_match.rs +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests inside this module are made to ensure that our custom justification verification -//! implementation works exactly as `fn finality_grandpa::validate_commit`. -//! -//! Some of tests in this module may partially duplicate tests from `justification.rs`, -//! but their purpose is different. - -use bp_header_chain::justification::{verify_justification, Error, GrandpaJustification}; -use bp_test_utils::{ - header_id, make_justification_for_header, signed_precommit, test_header, Account, - JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, TEST_GRANDPA_SET_ID, -}; -use finality_grandpa::voter_set::VoterSet; -use sp_finality_grandpa::{AuthorityId, AuthorityWeight}; -use sp_runtime::traits::Header as HeaderT; - -type TestHeader = sp_runtime::testing::Header; -type TestHash = ::Hash; -type TestNumber = ::Number; - -/// Implementation of `finality_grandpa::Chain` that is used in tests. -struct AncestryChain(bp_header_chain::justification::AncestryChain); - -impl AncestryChain { - fn new(ancestry: &[TestHeader]) -> Self { - Self(bp_header_chain::justification::AncestryChain::new(ancestry)) - } -} - -impl finality_grandpa::Chain for AncestryChain { - fn ancestry( - &self, - base: TestHash, - block: TestHash, - ) -> Result, finality_grandpa::Error> { - let mut route = Vec::new(); - let mut current_hash = block; - loop { - if current_hash == base { - break - } - match self.0.parents.get(¤t_hash).cloned() { - Some(parent_hash) => { - current_hash = parent_hash; - route.push(current_hash); - }, - _ => return Err(finality_grandpa::Error::NotDescendent), - } - } - route.pop(); // remove the base - - Ok(route) - } -} - -/// Get a full set of accounts. -fn full_accounts_set() -> Vec<(Account, AuthorityWeight)> { - vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)] -} - -/// Get a full set of GRANDPA authorities. -fn full_voter_set() -> VoterSet { - VoterSet::new(full_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w))).unwrap() -} - -/// Get a minimal set of accounts. -fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> { - // there are 5 accounts in the full set => we need 2/3 + 1 accounts, which results in 4 accounts - vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)] -} - -/// Get a minimal subset of GRANDPA authorities that have enough cumulative vote weight to justify a -/// header finality. -pub fn minimal_voter_set() -> VoterSet { - VoterSet::new(minimal_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w))) - .unwrap() -} - -/// Make a valid GRANDPA justification with sensible defaults. -pub fn make_default_justification(header: &TestHeader) -> GrandpaJustification { - make_justification_for_header(JustificationGeneratorParams { - header: header.clone(), - authorities: minimal_accounts_set(), - ..Default::default() - }) -} - -// the `finality_grandpa::validate_commit` function has two ways to report an unsuccessful -// commit validation: -// -// 1) to return `Err()` (which only may happen if `finality_grandpa::Chain` implementation -// returns an error); -// 2) to return `Ok(validation_result)` if `validation_result.is_valid()` is false. -// -// Our implementation would just return error in both cases. - -#[test] -fn same_result_when_precommit_target_has_lower_number_than_commit_target() { - let mut justification = make_default_justification(&test_header(1)); - // the number of header in precommit (0) is lower than number of header in commit (1) - justification.commit.precommits[0].precommit.target_number = 0; - - // our implementation returns an error - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &full_voter_set(), - &justification, - ), - Err(Error::PrecommitIsNotCommitDescendant), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification.votes_ancestries), - ) - .unwrap(); - - assert!(!result.is_valid()); -} - -#[test] -fn same_result_when_precommit_target_is_not_descendant_of_commit_target() { - let not_descendant = test_header::(10); - let mut justification = make_default_justification(&test_header(1)); - // the route from header of commit (1) to header of precommit (10) is missing from - // the votes ancestries - justification.commit.precommits[0].precommit.target_number = *not_descendant.number(); - justification.commit.precommits[0].precommit.target_hash = not_descendant.hash(); - justification.votes_ancestries.push(not_descendant); - - // our implementation returns an error - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &full_voter_set(), - &justification, - ), - Err(Error::PrecommitIsNotCommitDescendant), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification.votes_ancestries), - ) - .unwrap(); - - assert!(!result.is_valid()); -} - -#[test] -fn same_result_when_justification_contains_duplicate_vote() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - - // the justification may contain exactly the same vote (i.e. same precommit and same signature) - // multiple times && it isn't treated as an error by original implementation - justification.commit.precommits.push(justification.commit.precommits[0].clone()); - justification.commit.precommits.push(justification.commit.precommits[0].clone()); - - // our implementation succeeds - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &full_voter_set(), - &justification, - ), - Ok(()), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification.votes_ancestries), - ) - .unwrap(); - - assert!(result.is_valid()); -} - -#[test] -fn same_result_when_authority_equivocates_once_in_a_round() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - - // the justification original implementation allows authority to submit two different - // votes in a single round, of which only first is 'accepted' - justification.commit.precommits.push(signed_precommit::( - &ALICE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - - // our implementation succeeds - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &full_voter_set(), - &justification, - ), - Ok(()), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification.votes_ancestries), - ) - .unwrap(); - - assert!(result.is_valid()); -} - -#[test] -fn same_result_when_authority_equivocates_twice_in_a_round() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - - // there's some code in the original implementation that should return an error when - // same authority submits more than two different votes in a single round: - // https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/lib.rs#L473 - // but there's also a code that prevents this from happening: - // https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/round.rs#L287 - // => so now we are also just ignoring all votes from the same authority, except the first one - justification.commit.precommits.push(signed_precommit::( - &ALICE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - justification.commit.precommits.push(signed_precommit::( - &ALICE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - - // our implementation succeeds - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &full_voter_set(), - &justification, - ), - Ok(()), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification.votes_ancestries), - ) - .unwrap(); - - assert!(result.is_valid()); -} - -#[test] -fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() { - // just remove one authority from the minimal set and we shall not reach the threshold - let mut authorities_set = minimal_accounts_set(); - authorities_set.pop(); - let justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: authorities_set, - ..Default::default() - }); - - // our implementation returns an error - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &full_voter_set(), - &justification, - ), - Err(Error::TooLowCumulativeWeight), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification.votes_ancestries), - ) - .unwrap(); - - assert!(!result.is_valid()); -} diff --git a/polkadot/bridges/primitives/header-chain/tests/justification.rs b/polkadot/bridges/primitives/header-chain/tests/justification.rs deleted file mode 100644 index 5b4981a0f69..00000000000 --- a/polkadot/bridges/primitives/header-chain/tests/justification.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2020-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for Grandpa Justification code. - -use bp_header_chain::justification::{verify_justification, Error}; -use bp_test_utils::*; - -type TestHeader = sp_runtime::testing::Header; - -#[test] -fn valid_justification_accepted() { - let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)]; - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone(), - ancestors: 7, - forks: 3, - }; - - let justification = make_justification_for_header::(params.clone()); - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &voter_set(), - &justification, - ), - Ok(()), - ); - - assert_eq!(justification.commit.precommits.len(), authorities.len()); - assert_eq!(justification.votes_ancestries.len(), params.ancestors as usize); -} - -#[test] -fn valid_justification_accepted_with_single_fork() { - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)], - ancestors: 5, - forks: 1, - }; - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &voter_set(), - &make_justification_for_header::(params) - ), - Ok(()), - ); -} - -#[test] -fn valid_justification_accepted_with_arbitrary_number_of_authorities() { - use finality_grandpa::voter_set::VoterSet; - use sp_finality_grandpa::AuthorityId; - - let n = 15; - let authorities = accounts(n).iter().map(|k| (*k, 1)).collect::>(); - - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone(), - ancestors: n.into(), - forks: n.into(), - }; - - let authorities = authorities - .iter() - .map(|(id, w)| (AuthorityId::from(*id), *w)) - .collect::>(); - let voter_set = VoterSet::new(authorities).unwrap(); - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &voter_set, - &make_justification_for_header::(params) - ), - Ok(()), - ); -} - -#[test] -fn justification_with_invalid_target_rejected() { - assert_eq!( - verify_justification::( - header_id::(2), - TEST_GRANDPA_SET_ID, - &voter_set(), - &make_default_justification::(&test_header(1)), - ), - Err(Error::InvalidJustificationTarget), - ); -} - -#[test] -fn justification_with_invalid_commit_rejected() { - let mut justification = make_default_justification::(&test_header(1)); - justification.commit.precommits.clear(); - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &voter_set(), - &justification, - ), - Err(Error::ExtraHeadersInVotesAncestries), - ); -} - -#[test] -fn justification_with_invalid_authority_signature_rejected() { - let mut justification = make_default_justification::(&test_header(1)); - justification.commit.precommits[0].signature = - sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]); - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &voter_set(), - &justification, - ), - Err(Error::InvalidAuthoritySignature), - ); -} - -#[test] -fn justification_with_invalid_precommit_ancestry() { - let mut justification = make_default_justification::(&test_header(1)); - justification.votes_ancestries.push(test_header(10)); - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &voter_set(), - &justification, - ), - Err(Error::ExtraHeadersInVotesAncestries), - ); -} - -#[test] -fn justification_is_invalid_if_we_dont_meet_threshold() { - // Need at least three authorities to sign off or else the voter set threshold can't be reached - let authorities = vec![(ALICE, 1), (BOB, 1)]; - - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone(), - ancestors: 2 * authorities.len() as u32, - forks: 2, - }; - - assert_eq!( - verify_justification::( - header_id::(1), - TEST_GRANDPA_SET_ID, - &voter_set(), - &make_justification_for_header::(params) - ), - Err(Error::TooLowCumulativeWeight), - ); -} diff --git a/polkadot/bridges/primitives/message-dispatch/Cargo.toml b/polkadot/bridges/primitives/message-dispatch/Cargo.toml deleted file mode 100644 index 39b2d00111e..00000000000 --- a/polkadot/bridges/primitives/message-dispatch/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "bp-message-dispatch" -description = "Primitives of bridge messages dispatch modules." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/message-dispatch/src/lib.rs b/polkadot/bridges/primitives/message-dispatch/src/lib.rs deleted file mode 100644 index 07e448ee7ae..00000000000 --- a/polkadot/bridges/primitives/message-dispatch/src/lib.rs +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! A common interface for all Bridge Message Dispatch modules. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] - -use bp_runtime::{ - messages::{DispatchFeePayment, MessageDispatchResult}, - ChainId, Size, -}; -use codec::{Decode, Encode}; -use frame_support::RuntimeDebug; -use scale_info::TypeInfo; -use sp_std::prelude::*; - -/// Message dispatch weight. -pub type Weight = u64; - -/// Spec version type. -pub type SpecVersion = u32; - -/// A generic trait to dispatch arbitrary messages delivered over the bridge. -pub trait MessageDispatch { - /// A type of the message to be dispatched. - type Message: codec::Decode; - - /// Estimate dispatch weight. - /// - /// This function must: (1) be instant and (2) return correct upper bound - /// of dispatch weight. - fn dispatch_weight(message: &Self::Message) -> Weight; - - /// Dispatches the message internally. - /// - /// `source_chain` indicates the chain where the message came from. - /// `target_chain` indicates the chain where message dispatch happens. - /// - /// `id` is a short unique identifier of the message. - /// - /// If message is `Ok`, then it should be dispatched. If it is `Err`, then it's just - /// a sign that some other component has rejected the message even before it has - /// reached `dispatch` method (right now this may only be caused if we fail to decode - /// the whole message). - /// - /// Returns unspent dispatch weight. - fn dispatch Result<(), ()>>( - source_chain: ChainId, - target_chain: ChainId, - id: BridgeMessageId, - message: Result, - pay_dispatch_fee: P, - ) -> MessageDispatchResult; -} - -/// Origin of a Call when it is dispatched on the target chain. -/// -/// The source chain can (and should) verify that the message can be dispatched on the target chain -/// with a particular origin given the source chain's origin. This can be done with the -/// `verify_message_origin()` function. -#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub enum CallOrigin { - /// Call is sent by the Root origin on the source chain. On the target chain it is dispatched - /// from a derived account. - /// - /// The derived account represents the source Root account on the target chain. This is useful - /// if the target chain needs some way of knowing that a call came from a privileged origin on - /// the source chain (maybe to allow a configuration change for example). - SourceRoot, - - /// Call is sent by `SourceChainAccountId` on the source chain. On the target chain it is - /// dispatched from an account controlled by a private key on the target chain. - /// - /// The account can be identified by `TargetChainAccountPublic`. The proof that the - /// `SourceChainAccountId` controls `TargetChainAccountPublic` is the `TargetChainSignature` - /// over `(Call, SourceChainAccountId, TargetChainSpecVersion, SourceChainBridgeId).encode()`. - /// - /// NOTE sending messages using this origin (or any other) does not have replay protection! - /// The assumption is that both the source account and the target account is controlled by - /// the same entity, so source-chain replay protection is sufficient. - /// As a consequence, it's extremely important for the target chain user to never produce - /// a signature with their target-private key on something that could be sent over the bridge, - /// i.e. if the target user signs `(, Call::Transfer(X, 5))` - /// The owner of `some-source-account-id` can send that message multiple times, which would - /// result with multiple transfer calls being dispatched on the target chain. - /// So please, NEVER USE YOUR PRIVATE KEY TO SIGN SOMETHING YOU DON'T FULLY UNDERSTAND! - TargetAccount(SourceChainAccountId, TargetChainAccountPublic, TargetChainSignature), - - /// Call is sent by the `SourceChainAccountId` on the source chain. On the target chain it is - /// dispatched from a derived account ID. - /// - /// The account ID on the target chain is derived from the source account ID. This is useful if - /// you need a way to represent foreign accounts on this chain for call dispatch purposes. - /// - /// Note that the derived account does not need to have a private key on the target chain. This - /// origin can therefore represent proxies, pallets, etc. as well as "regular" accounts. - SourceAccount(SourceChainAccountId), -} - -/// Message payload type used by dispatch module. -#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub struct MessagePayload< - SourceChainAccountId, - TargetChainAccountPublic, - TargetChainSignature, - Call, -> { - /// Runtime specification version. We only dispatch messages that have the same - /// runtime version. Otherwise we risk to misinterpret encoded calls. - pub spec_version: SpecVersion, - /// Weight of the call, declared by the message sender. If it is less than actual - /// static weight, the call is not dispatched. - pub weight: Weight, - /// Call origin to be used during dispatch. - pub origin: CallOrigin, - /// Where the fee for dispatching message is paid? - pub dispatch_fee_payment: DispatchFeePayment, - /// The call itself. - pub call: Call, -} - -impl Size - for MessagePayload> -{ - fn size_hint(&self) -> u32 { - self.call.len() as _ - } -} diff --git a/polkadot/bridges/primitives/messages/Cargo.toml b/polkadot/bridges/primitives/messages/Cargo.toml deleted file mode 100644 index 2a84f74d225..00000000000 --- a/polkadot/bridges/primitives/messages/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "bp-messages" -description = "Primitives of messages module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -bitvec = { version = "1", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive", "bit-vec"] } -impl-trait-for-tuples = "0.2" -scale-info = { version = "2.0.1", default-features = false, features = ["bit-vec", "derive"] } -serde = { version = "1.0", optional = true, features = ["derive"] } - -# Bridge dependencies - -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -hex = "0.4" -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "bitvec/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", - "serde", - "sp-core/std", - "sp-std/std" -] diff --git a/polkadot/bridges/primitives/messages/src/lib.rs b/polkadot/bridges/primitives/messages/src/lib.rs deleted file mode 100644 index 05ac38d7e48..00000000000 --- a/polkadot/bridges/primitives/messages/src/lib.rs +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of messages module. - -#![cfg_attr(not(feature = "std"), no_std)] -// RuntimeApi generated functions -#![allow(clippy::too_many_arguments)] - -use bitvec::prelude::*; -use bp_runtime::messages::DispatchFeePayment; -use codec::{Decode, Encode}; -use frame_support::RuntimeDebug; -use scale_info::TypeInfo; -use sp_std::{collections::vec_deque::VecDeque, prelude::*}; - -pub mod source_chain; -pub mod storage_keys; -pub mod target_chain; - -// Weight is reexported to avoid additional frame-support dependencies in related crates. -pub use frame_support::weights::Weight; - -/// Messages pallet operating mode. -#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, TypeInfo)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] -pub enum OperatingMode { - /// Normal mode, when all operations are allowed. - Normal, - /// The pallet is not accepting outbound messages. Inbound messages and receival proofs - /// are still accepted. - /// - /// This mode may be used e.g. when bridged chain expects upgrade. Then to avoid dispatch - /// failures, the pallet owner may stop accepting new messages, while continuing to deliver - /// queued messages to the bridged chain. Once upgrade is completed, the mode may be switched - /// back to `Normal`. - RejectingOutboundMessages, - /// The pallet is halted. All operations (except operating mode change) are prohibited. - Halted, -} - -impl Default for OperatingMode { - fn default() -> Self { - OperatingMode::Normal - } -} - -/// Messages pallet parameter. -pub trait Parameter: frame_support::Parameter { - /// Save parameter value in the runtime storage. - fn save(&self); -} - -impl Parameter for () { - fn save(&self) {} -} - -/// Lane identifier. -pub type LaneId = [u8; 4]; - -/// Message nonce. Valid messages will never have 0 nonce. -pub type MessageNonce = u64; - -/// Message id as a tuple. -pub type BridgeMessageId = (LaneId, MessageNonce); - -/// Opaque message payload. We only decode this payload when it is dispatched. -pub type MessagePayload = Vec; - -/// Message key (unique message identifier) as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct MessageKey { - /// ID of the message lane. - pub lane_id: LaneId, - /// Message nonce. - pub nonce: MessageNonce, -} - -/// Message data as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct MessageData { - /// Message payload. - pub payload: MessagePayload, - /// Message delivery and dispatch fee, paid by the submitter. - pub fee: Fee, -} - -/// Message as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct Message { - /// Message key. - pub key: MessageKey, - /// Message data. - pub data: MessageData, -} - -/// Inbound lane data. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct InboundLaneData { - /// Identifiers of relayers and messages that they have delivered to this lane (ordered by - /// message nonce). - /// - /// This serves as a helper storage item, to allow the source chain to easily pay rewards - /// to the relayers who successfully delivered messages to the target chain (inbound lane). - /// - /// It is guaranteed to have at most N entries, where N is configured at the module level. - /// If there are N entries in this vec, then: - /// 1) all incoming messages are rejected if they're missing corresponding - /// `proof-of(outbound-lane.state)`; 2) all incoming messages are rejected if - /// `proof-of(outbound-lane.state).last_delivered_nonce` is equal to - /// `self.last_confirmed_nonce`. Given what is said above, all nonces in this queue are in - /// range: `(self.last_confirmed_nonce; self.last_delivered_nonce()]`. - /// - /// When a relayer sends a single message, both of MessageNonces are the same. - /// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the - /// highest nonce. Multiple dispatches from the same relayer are allowed. - pub relayers: VecDeque>, - - /// Nonce of the last message that - /// a) has been delivered to the target (this) chain and - /// b) the delivery has been confirmed on the source chain - /// - /// that the target chain knows of. - /// - /// This value is updated indirectly when an `OutboundLane` state of the source - /// chain is received alongside with new messages delivery. - pub last_confirmed_nonce: MessageNonce, -} - -impl Default for InboundLaneData { - fn default() -> Self { - InboundLaneData { relayers: VecDeque::new(), last_confirmed_nonce: 0 } - } -} - -impl InboundLaneData { - /// Returns approximate size of the struct, given a number of entries in the `relayers` set and - /// size of each entry. - /// - /// Returns `None` if size overflows `u32` limits. - pub fn encoded_size_hint( - relayer_id_encoded_size: u32, - relayers_entries: u32, - messages_count: u32, - ) -> Option { - let message_nonce_size = 8; - let relayers_entry_size = relayer_id_encoded_size.checked_add(2 * message_nonce_size)?; - let relayers_size = relayers_entries.checked_mul(relayers_entry_size)?; - let dispatch_results_per_byte = 8; - let dispatch_result_size = - sp_std::cmp::max(relayers_entries, messages_count / dispatch_results_per_byte); - relayers_size - .checked_add(message_nonce_size) - .and_then(|result| result.checked_add(dispatch_result_size)) - } - - /// Nonce of the last message that has been delivered to this (target) chain. - pub fn last_delivered_nonce(&self) -> MessageNonce { - self.relayers - .back() - .map(|entry| entry.messages.end) - .unwrap_or(self.last_confirmed_nonce) - } -} - -/// Message details, returned by runtime APIs. -#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] -pub struct MessageDetails { - /// Nonce assigned to the message. - pub nonce: MessageNonce, - /// Message dispatch weight, declared by the submitter. - pub dispatch_weight: Weight, - /// Size of the encoded message. - pub size: u32, - /// Delivery+dispatch fee paid by the message submitter at the source chain. - pub delivery_and_dispatch_fee: OutboundMessageFee, - /// Where the fee for dispatching message is paid? - pub dispatch_fee_payment: DispatchFeePayment, -} - -/// Bit vector of message dispatch results. -pub type DispatchResultsBitVec = BitVec; - -/// Unrewarded relayer entry stored in the inbound lane data. -/// -/// This struct represents a continuous range of messages that have been delivered by the same -/// relayer and whose confirmations are still pending. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct UnrewardedRelayer { - /// Identifier of the relayer. - pub relayer: RelayerId, - /// Messages range, delivered by this relayer. - pub messages: DeliveredMessages, -} - -/// Delivered messages with their dispatch result. -#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct DeliveredMessages { - /// Nonce of the first message that has been delivered (inclusive). - pub begin: MessageNonce, - /// Nonce of the last message that has been delivered (inclusive). - pub end: MessageNonce, - /// Dispatch result (`false`/`true`), returned by the message dispatcher for every - /// message in the `[begin; end]` range. See `dispatch_result` field of the - /// `bp_runtime::messages::MessageDispatchResult` structure for more information. - pub dispatch_results: DispatchResultsBitVec, -} - -impl DeliveredMessages { - /// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given - /// dispatch result. - pub fn new(nonce: MessageNonce, dispatch_result: bool) -> Self { - let mut dispatch_results = BitVec::with_capacity(1); - dispatch_results.push(if dispatch_result { true } else { false }); - DeliveredMessages { begin: nonce, end: nonce, dispatch_results } - } - - /// Return total count of delivered messages. - pub fn total_messages(&self) -> MessageNonce { - if self.end >= self.begin { - self.end - self.begin + 1 - } else { - 0 - } - } - - /// Note new dispatched message. - pub fn note_dispatched_message(&mut self, dispatch_result: bool) { - self.end += 1; - self.dispatch_results.push(dispatch_result); - } - - /// Returns true if delivered messages contain message with given nonce. - pub fn contains_message(&self, nonce: MessageNonce) -> bool { - (self.begin..=self.end).contains(&nonce) - } - - /// Get dispatch result flag by message nonce. - /// - /// Dispatch result flag must be interpreted using the knowledge of dispatch mechanism - /// at the target chain. See `dispatch_result` field of the - /// `bp_runtime::messages::MessageDispatchResult` structure for more information. - /// - /// Panics if message nonce is not in the `begin..=end` range. Typically you'll first - /// check if message is within the range by calling `contains_message`. - pub fn message_dispatch_result(&self, nonce: MessageNonce) -> bool { - const INVALID_NONCE: &str = "Invalid nonce used to index dispatch_results"; - - let index = nonce.checked_sub(self.begin).expect(INVALID_NONCE) as usize; - *self.dispatch_results.get(index).expect(INVALID_NONCE) - } -} - -/// Gist of `InboundLaneData::relayers` field used by runtime APIs. -#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct UnrewardedRelayersState { - /// Number of entries in the `InboundLaneData::relayers` set. - pub unrewarded_relayer_entries: MessageNonce, - /// Number of messages in the oldest entry of `InboundLaneData::relayers`. This is the - /// minimal number of reward proofs required to push out this entry from the set. - pub messages_in_oldest_entry: MessageNonce, - /// Total number of messages in the relayers vector. - pub total_messages: MessageNonce, -} - -/// Outbound lane data. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct OutboundLaneData { - /// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated - /// message if all sent messages are already pruned. - pub oldest_unpruned_nonce: MessageNonce, - /// Nonce of the latest message, received by bridged chain. - pub latest_received_nonce: MessageNonce, - /// Nonce of the latest message, generated by us. - pub latest_generated_nonce: MessageNonce, -} - -impl Default for OutboundLaneData { - fn default() -> Self { - OutboundLaneData { - // it is 1 because we're pruning everything in [oldest_unpruned_nonce; - // latest_received_nonce] - oldest_unpruned_nonce: 1, - latest_received_nonce: 0, - latest_generated_nonce: 0, - } - } -} - -/// Returns total number of messages in the `InboundLaneData::relayers` vector. -/// -/// Returns `None` if there are more messages that `MessageNonce` may fit (i.e. `MessageNonce + 1`). -pub fn total_unrewarded_messages( - relayers: &VecDeque>, -) -> Option { - match (relayers.front(), relayers.back()) { - (Some(front), Some(back)) => { - if let Some(difference) = back.messages.end.checked_sub(front.messages.begin) { - difference.checked_add(1) - } else { - Some(0) - } - }, - _ => Some(0), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn total_unrewarded_messages_does_not_overflow() { - assert_eq!( - total_unrewarded_messages( - &vec![ - UnrewardedRelayer { relayer: 1, messages: DeliveredMessages::new(0, true) }, - UnrewardedRelayer { - relayer: 2, - messages: DeliveredMessages::new(MessageNonce::MAX, true) - }, - ] - .into_iter() - .collect() - ), - None, - ); - } - - #[test] - fn inbound_lane_data_returns_correct_hint() { - let test_cases = vec![ - // single relayer, multiple messages - (1, 128u8), - // multiple relayers, single message per relayer - (128u8, 128u8), - // several messages per relayer - (13u8, 128u8), - ]; - for (relayer_entries, messages_count) in test_cases { - let expected_size = InboundLaneData::::encoded_size_hint( - 1, - relayer_entries as _, - messages_count as _, - ); - let actual_size = InboundLaneData { - relayers: (1u8..=relayer_entries) - .map(|i| { - let mut entry = UnrewardedRelayer { - relayer: i, - messages: DeliveredMessages::new(i as _, true), - }; - entry.messages.dispatch_results = bitvec![ - u8, Msb0; - 1; - (messages_count / relayer_entries) as _ - ]; - entry - }) - .collect(), - last_confirmed_nonce: messages_count as _, - } - .encode() - .len(); - let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs(); - assert!( - difference / (std::cmp::min(actual_size, expected_size.unwrap() as usize) as f64) < 0.1, - "Too large difference between actual ({}) and expected ({:?}) inbound lane data size. Test case: {}+{}", - actual_size, - expected_size, - relayer_entries, - messages_count, - ); - } - } - - #[test] - fn message_dispatch_result_works() { - let delivered_messages = - DeliveredMessages { begin: 100, end: 150, dispatch_results: bitvec![u8, Msb0; 1; 151] }; - - assert!(!delivered_messages.contains_message(99)); - assert!(delivered_messages.contains_message(100)); - assert!(delivered_messages.contains_message(150)); - assert!(!delivered_messages.contains_message(151)); - - assert!(delivered_messages.message_dispatch_result(125)); - } -} diff --git a/polkadot/bridges/primitives/messages/src/source_chain.rs b/polkadot/bridges/primitives/messages/src/source_chain.rs deleted file mode 100644 index fa7b3bb85ed..00000000000 --- a/polkadot/bridges/primitives/messages/src/source_chain.rs +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of messages module, that are used on the source chain. - -use crate::{DeliveredMessages, InboundLaneData, LaneId, MessageNonce, OutboundLaneData}; - -use crate::UnrewardedRelayer; -use bp_runtime::Size; -use frame_support::{weights::Weight, Parameter, RuntimeDebug}; -use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - fmt::Debug, - ops::RangeInclusive, -}; - -/// The sender of the message on the source chain. -pub trait SenderOrigin { - /// Return id of the account that is sending this message. - /// - /// In regular messages configuration, when regular message is sent you'll always get `Some(_)` - /// from this call. This is the account that is paying send costs. However, there are some - /// examples when `None` may be returned from the call: - /// - /// - if the send-message call origin is either `frame_system::RawOrigin::Root` or - /// `frame_system::RawOrigin::None` and your configuration forbids such messages; - /// - if your configuration allows 'unpaid' messages sent by pallets. Then the pallet may just - /// use its own defined origin (not linked to any account) and the message will be accepted. - /// This may be useful for pallets that are sending important system-wide information (like - /// update of runtime version). - fn linked_account(&self) -> Option; -} - -/// Relayers rewards, grouped by relayer account id. -pub type RelayersRewards = BTreeMap>; - -/// Single relayer rewards. -#[derive(RuntimeDebug, Default)] -pub struct RelayerRewards { - /// Total rewards that are to be paid to the relayer. - pub reward: Balance, - /// Total number of messages relayed by this relayer. - pub messages: MessageNonce, -} - -/// Target chain API. Used by source chain to verify target chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -pub trait TargetHeaderChain { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Proof that messages have been received by target chain. - type MessagesDeliveryProof: Parameter + Size; - - /// Verify message payload before we accept it. - /// - /// **CAUTION**: this is very important function. Incorrect implementation may lead - /// to stuck lanes and/or relayers loses. - /// - /// The proper implementation must ensure that the delivery-transaction with this - /// payload would (at least) be accepted into target chain transaction pool AND - /// eventually will be successfully mined. The most obvious incorrect implementation - /// example would be implementation for BTC chain that accepts payloads larger than - /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer - /// will be unable to craft valid transaction => this (and all subsequent) messages will - /// never be delivered. - fn verify_message(payload: &Payload) -> Result<(), Self::Error>; - - /// Verify messages delivery proof and return lane && nonce of the latest received message. - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error>; -} - -/// Lane message verifier. -/// -/// Runtime developer may implement any additional validation logic over message-lane mechanism. -/// E.g. if lanes should have some security (e.g. you can only accept Lane1 messages from -/// Submitter1, Lane2 messages for those who has submitted first message to this lane, disable -/// Lane3 until some block, ...), then it may be built using this verifier. -/// -/// Any fee requirements should also be enforced here. -pub trait LaneMessageVerifier { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Verify message payload and return Ok(()) if message is valid and allowed to be sent over the - /// lane. - fn verify_message( - submitter: &SenderOrigin, - delivery_and_dispatch_fee: &Fee, - lane: &LaneId, - outbound_data: &OutboundLaneData, - payload: &Payload, - ) -> Result<(), Self::Error>; -} - -/// Message delivery payment. It is called as a part of submit-message transaction. Transaction -/// submitter is paying (in source chain tokens/assets) for: -/// -/// 1) submit-message-transaction-fee itself. This fee is not included in the -/// `delivery_and_dispatch_fee` and is withheld by the regular transaction payment mechanism; -/// 2) message-delivery-transaction-fee. It is submitted to the target node by relayer; -/// 3) message-dispatch fee. It is paid by relayer for processing message by target chain; -/// 4) message-receiving-delivery-transaction-fee. It is submitted to the source node -/// by relayer. -/// -/// So to be sure that any non-altruist relayer would agree to deliver message, submitter -/// should set `delivery_and_dispatch_fee` to at least (equivalent of): sum of fees from (2) -/// to (4) above, plus some interest for the relayer. -pub trait MessageDeliveryAndDispatchPayment { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Withhold/write-off delivery_and_dispatch_fee from submitter account to - /// some relayers-fund account. - fn pay_delivery_and_dispatch_fee( - submitter: &SenderOrigin, - fee: &Balance, - relayer_fund_account: &AccountId, - ) -> Result<(), Self::Error>; - - /// Pay rewards for delivering messages to the given relayers. - /// - /// The implementation may also choose to pay reward to the `confirmation_relayer`, which is - /// a relayer that has submitted delivery confirmation transaction. - fn pay_relayers_rewards( - lane_id: LaneId, - messages_relayers: VecDeque>, - confirmation_relayer: &AccountId, - received_range: &RangeInclusive, - relayer_fund_account: &AccountId, - ); -} - -/// Send message artifacts. -#[derive(RuntimeDebug, PartialEq)] -pub struct SendMessageArtifacts { - /// Nonce of the message. - pub nonce: MessageNonce, - /// Actual weight of send message call. - pub weight: Weight, -} - -/// Messages bridge API to be used from other pallets. -pub trait MessagesBridge { - /// Error type. - type Error: Debug; - - /// Send message over the bridge. - /// - /// Returns unique message nonce or error if send has failed. - fn send_message( - sender: SenderOrigin, - lane: LaneId, - message: Payload, - delivery_and_dispatch_fee: Balance, - ) -> Result; -} - -/// Bridge that does nothing when message is being sent. -#[derive(RuntimeDebug, PartialEq)] -pub struct NoopMessagesBridge; - -impl - MessagesBridge for NoopMessagesBridge -{ - type Error = &'static str; - - fn send_message( - _sender: SenderOrigin, - _lane: LaneId, - _message: Payload, - _delivery_and_dispatch_fee: Balance, - ) -> Result { - Ok(SendMessageArtifacts { nonce: 0, weight: 0 }) - } -} - -/// Handler for messages delivery confirmation. -pub trait OnDeliveryConfirmed { - /// Called when we receive confirmation that our messages have been delivered to the - /// target chain. The confirmation also has single bit dispatch result for every - /// confirmed message (see `DeliveredMessages` for details). Guaranteed to be called - /// only when at least one message is delivered. - /// - /// Should return total weight consumed by the call. - /// - /// NOTE: messages pallet assumes that maximal weight that may be spent on processing - /// single message is single DB read + single DB write. So this function shall never - /// return weight that is larger than total number of messages * (db read + db write). - /// If your pallet needs more time for processing single message, please do it - /// from `on_initialize` call(s) of the next block(s). - fn on_messages_delivered(_lane: &LaneId, _messages: &DeliveredMessages) -> Weight; -} - -#[impl_trait_for_tuples::impl_for_tuples(30)] -impl OnDeliveryConfirmed for Tuple { - fn on_messages_delivered(lane: &LaneId, messages: &DeliveredMessages) -> Weight { - let mut total_weight: Weight = 0; - for_tuples!( - #( - total_weight = total_weight.saturating_add(Tuple::on_messages_delivered(lane, messages)); - )* - ); - total_weight - } -} - -/// Handler for messages have been accepted -pub trait OnMessageAccepted { - /// Called when a message has been accepted by message pallet. - fn on_messages_accepted(lane: &LaneId, message: &MessageNonce) -> Weight; -} - -impl OnMessageAccepted for () { - fn on_messages_accepted(_lane: &LaneId, _message: &MessageNonce) -> Weight { - 0 - } -} - -/// Structure that may be used in place of `TargetHeaderChain`, `LaneMessageVerifier` and -/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden. -pub struct ForbidOutboundMessages; - -/// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_OUTBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all outbound messages"; - -impl TargetHeaderChain for ForbidOutboundMessages { - type Error = &'static str; - - type MessagesDeliveryProof = (); - - fn verify_message(_payload: &Payload) -> Result<(), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } - - fn verify_messages_delivery_proof( - _proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } -} - -impl - LaneMessageVerifier for ForbidOutboundMessages -{ - type Error = &'static str; - - fn verify_message( - _submitter: &SenderOrigin, - _delivery_and_dispatch_fee: &Fee, - _lane: &LaneId, - _outbound_data: &OutboundLaneData, - _payload: &Payload, - ) -> Result<(), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } -} - -impl - MessageDeliveryAndDispatchPayment for ForbidOutboundMessages -{ - type Error = &'static str; - - fn pay_delivery_and_dispatch_fee( - _submitter: &SenderOrigin, - _fee: &Balance, - _relayer_fund_account: &AccountId, - ) -> Result<(), Self::Error> { - Err(ALL_OUTBOUND_MESSAGES_REJECTED) - } - - fn pay_relayers_rewards( - _lane_id: LaneId, - _messages_relayers: VecDeque>, - _confirmation_relayer: &AccountId, - _received_range: &RangeInclusive, - _relayer_fund_account: &AccountId, - ) { - } -} diff --git a/polkadot/bridges/primitives/messages/src/storage_keys.rs b/polkadot/bridges/primitives/messages/src/storage_keys.rs deleted file mode 100644 index 19494b8b852..00000000000 --- a/polkadot/bridges/primitives/messages/src/storage_keys.rs +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage keys of bridge messages pallet. - -/// Name of the `OPERATING_MODE_VALUE_NAME` storage value. -pub const OPERATING_MODE_VALUE_NAME: &str = "PalletOperatingMode"; -/// Name of the `OutboundMessages` storage map. -pub const OUTBOUND_MESSAGES_MAP_NAME: &str = "OutboundMessages"; -/// Name of the `OutboundLanes` storage map. -pub const OUTBOUND_LANES_MAP_NAME: &str = "OutboundLanes"; -/// Name of the `InboundLanes` storage map. -pub const INBOUND_LANES_MAP_NAME: &str = "InboundLanes"; - -use crate::{LaneId, MessageKey, MessageNonce}; - -use codec::Encode; -use frame_support::Blake2_128Concat; -use sp_core::storage::StorageKey; - -/// Storage key of the `PalletOperatingMode` value in the runtime storage. -pub fn operating_mode_key(pallet_prefix: &str) -> StorageKey { - StorageKey( - bp_runtime::storage_value_final_key( - pallet_prefix.as_bytes(), - OPERATING_MODE_VALUE_NAME.as_bytes(), - ) - .to_vec(), - ) -} - -/// Storage key of the outbound message in the runtime storage. -pub fn message_key(pallet_prefix: &str, lane: &LaneId, nonce: MessageNonce) -> StorageKey { - bp_runtime::storage_map_final_key::( - pallet_prefix, - OUTBOUND_MESSAGES_MAP_NAME, - &MessageKey { lane_id: *lane, nonce }.encode(), - ) -} - -/// Storage key of the outbound message lane state in the runtime storage. -pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { - bp_runtime::storage_map_final_key::( - pallet_prefix, - OUTBOUND_LANES_MAP_NAME, - lane, - ) -} - -/// Storage key of the inbound message lane state in the runtime storage. -pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { - bp_runtime::storage_map_final_key::( - pallet_prefix, - INBOUND_LANES_MAP_NAME, - lane, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn operating_mode_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is possibly - // breaking all existing message relays. - let storage_key = operating_mode_key("BridgeMessages").0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed010f4cf0917788d791142ff6c1f216e7b3").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn storage_message_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // all previously crafted messages proofs. - let storage_key = message_key("BridgeMessages", &*b"test", 42).0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn outbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // all previously crafted outbound lane state proofs. - let storage_key = outbound_lane_data_key("BridgeMessages", &*b"test").0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn inbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // all previously crafted inbound lane state proofs. - let storage_key = inbound_lane_data_key("BridgeMessages", &*b"test").0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } -} diff --git a/polkadot/bridges/primitives/messages/src/target_chain.rs b/polkadot/bridges/primitives/messages/src/target_chain.rs deleted file mode 100644 index a84ea7af907..00000000000 --- a/polkadot/bridges/primitives/messages/src/target_chain.rs +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of messages module, that are used on the target chain. - -use crate::{LaneId, Message, MessageData, MessageKey, OutboundLaneData}; - -use bp_runtime::{messages::MessageDispatchResult, Size}; -use codec::{Decode, Encode, Error as CodecError}; -use frame_support::{weights::Weight, Parameter, RuntimeDebug}; -use scale_info::TypeInfo; -use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, prelude::*}; - -/// Proved messages from the source chain. -pub type ProvedMessages = BTreeMap>; - -/// Proved messages from single lane of the source chain. -#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub struct ProvedLaneMessages { - /// Optional outbound lane state. - pub lane_state: Option, - /// Messages sent through this lane. - pub messages: Vec, -} - -/// Message data with decoded dispatch payload. -#[derive(RuntimeDebug)] -pub struct DispatchMessageData { - /// Result of dispatch payload decoding. - pub payload: Result, - /// Message delivery and dispatch fee, paid by the submitter. - pub fee: Fee, -} - -/// Message with decoded dispatch payload. -#[derive(RuntimeDebug)] -pub struct DispatchMessage { - /// Message key. - pub key: MessageKey, - /// Message data with decoded dispatch payload. - pub data: DispatchMessageData, -} - -/// Source chain API. Used by target chain, to verify source chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -pub trait SourceHeaderChain { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Proof that messages are sent from source chain. This may also include proof - /// of corresponding outbound lane states. - type MessagesProof: Parameter + Size; - - /// Verify messages proof and return proved messages. - /// - /// Returns error if either proof is incorrect, or the number of messages in the proof - /// is not matching the `messages_count`. - /// - /// Messages vector is required to be sorted by nonce within each lane. Out-of-order - /// messages will be rejected. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result>, Self::Error>; -} - -/// Called when inbound message is received. -pub trait MessageDispatch { - /// Decoded message payload type. Valid message may contain invalid payload. In this case - /// message is delivered, but dispatch fails. Therefore, two separate types of payload - /// (opaque `MessagePayload` used in delivery and this `DispatchPayload` used in dispatch). - type DispatchPayload: Decode; - - /// Estimate dispatch weight. - /// - /// This function must: (1) be instant and (2) return correct upper bound - /// of dispatch weight. - fn dispatch_weight(message: &DispatchMessage) -> Weight; - - /// Called when inbound message is received. - /// - /// It is up to the implementers of this trait to determine whether the message - /// is invalid (i.e. improperly encoded, has too large weight, ...) or not. - /// - /// If your configuration allows paying dispatch fee at the target chain, then - /// it must be paid inside this method to the `relayer_account`. - fn dispatch( - relayer_account: &AccountId, - message: DispatchMessage, - ) -> MessageDispatchResult; -} - -impl Default for ProvedLaneMessages { - fn default() -> Self { - ProvedLaneMessages { lane_state: None, messages: Vec::new() } - } -} - -impl From> for DispatchMessage { - fn from(message: Message) -> Self { - DispatchMessage { key: message.key, data: message.data.into() } - } -} - -impl From> - for DispatchMessageData -{ - fn from(data: MessageData) -> Self { - DispatchMessageData { - payload: DispatchPayload::decode(&mut &data.payload[..]), - fee: data.fee, - } - } -} - -/// Structure that may be used in place of `SourceHeaderChain` and `MessageDispatch` on chains, -/// where inbound messages are forbidden. -pub struct ForbidInboundMessages; - -/// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_INBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all inbound messages"; - -impl SourceHeaderChain for ForbidInboundMessages { - type Error = &'static str; - type MessagesProof = (); - - fn verify_messages_proof( - _proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result>, Self::Error> { - Err(ALL_INBOUND_MESSAGES_REJECTED) - } -} - -impl MessageDispatch for ForbidInboundMessages { - type DispatchPayload = (); - - fn dispatch_weight(_message: &DispatchMessage) -> Weight { - Weight::MAX - } - - fn dispatch( - _: &AccountId, - _: DispatchMessage, - ) -> MessageDispatchResult { - MessageDispatchResult { - dispatch_result: false, - unspent_weight: 0, - dispatch_fee_paid_during_dispatch: false, - } - } -} diff --git a/polkadot/bridges/primitives/polkadot-core/Cargo.toml b/polkadot/bridges/primitives/polkadot-core/Cargo.toml deleted file mode 100644 index 1542a784ef5..00000000000 --- a/polkadot/bridges/primitives/polkadot-core/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "bp-polkadot-core" -description = "Primitives of Polkadot-like runtime." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -parity-scale-codec = { version = "3.0.0", default-features = false, features = ["derive"] } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -hex = "0.4" - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "parity-scale-codec/std", - "scale-info/std", - "sp-api/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", - "sp-version/std", -] diff --git a/polkadot/bridges/primitives/polkadot-core/src/lib.rs b/polkadot/bridges/primitives/polkadot-core/src/lib.rs deleted file mode 100644 index 4c0a450eb71..00000000000 --- a/polkadot/bridges/primitives/polkadot-core/src/lib.rs +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_messages::MessageNonce; -use bp_runtime::{Chain, EncodedOrDecodedCall}; -use frame_support::{ - dispatch::Dispatchable, - parameter_types, - unsigned::TransactionValidityError, - weights::{ - constants::{BlockExecutionWeight, WEIGHT_PER_SECOND}, - DispatchClass, Weight, - }, - Blake2_128Concat, RuntimeDebug, StorageHasher, Twox128, -}; -use frame_system::limits; -use parity_scale_codec::Compact; -use scale_info::{StaticTypeInfo, TypeInfo}; -use sp_core::Hasher as HasherT; -use sp_runtime::{ - generic, - traits::{BlakeTwo256, DispatchInfoOf, IdentifyAccount, Verify}, - MultiAddress, MultiSignature, OpaqueExtrinsic, -}; -use sp_std::prelude::Vec; - -// Re-export's to avoid extra substrate dependencies in chain-specific crates. -pub use frame_support::{weights::constants::ExtrinsicBaseWeight, Parameter}; -pub use sp_runtime::{traits::Convert, Perbill}; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// Polkadot-like chain. This mostly depends on number of entries in the storage trie. -/// Some reserve is reserved to account future chain growth. -/// -/// To compute this value, we've synced Kusama chain blocks [0; 6545733] to see if there were -/// any significant changes of the storage proof size (NO): -/// -/// - at block 3072 the storage proof size overhead was 579 bytes; -/// - at block 2479616 it was 578 bytes; -/// - at block 4118528 it was 711 bytes; -/// - at block 6540800 it was 779 bytes. -/// -/// The number of storage entries at the block 6546170 was 351207 and number of trie nodes in -/// the storage proof was 5 (log(16, 351207) ~ 4.6). -/// -/// So the assumption is that the storage proof size overhead won't be larger than 1024 in the -/// nearest future. If it'll ever break this barrier, then we'll need to update this constant -/// at next runtime upgrade. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Maximal size (in bytes) of encoded (using `Encode::encode()`) account id. -/// -/// All polkadot-like chains are using same crypto. -pub const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 32; - -/// All Polkadot-like chains allow normal extrinsics to fill block up to 75 percent. -/// -/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// All Polkadot-like chains allow 2 seconds of compute with a 6-second average block time. -/// -/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. -pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; - -/// All Polkadot-like chains assume that an on-initialize consumes 1 percent of the weight on -/// average, hence a single extrinsic will not be allowed to consume more than -/// `AvailableBlockRatio - 1 percent`. -/// -/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); - -parameter_types! { - /// All Polkadot-like chains have maximal block size set to 5MB. - /// - /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. - pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio( - 5 * 1024 * 1024, - NORMAL_DISPATCH_RATIO, - ); - /// All Polkadot-like chains have the same block weights. - /// - /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have an extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT, - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -// TODO [#78] may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 -/// Maximal number of messages in single delivery transaction. -pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; - -/// Maximal number of unrewarded relayer entries at inbound lane. -pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 128; - -// TODO [#438] should be selected keeping in mind: -// finality delay on both chains + reward payout cost + messages throughput. -/// Maximal number of unconfirmed messages at inbound lane. -pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 8192; - -// One important thing about weight-related constants here is that actually we may have -// different weights on different Polkadot-like chains. But now all deployments are -// almost the same, so we're exporting constants from this crate. - -/// Maximal weight of single message delivery confirmation transaction on Polkadot-like chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` -/// weight formula computation for the case when single message is confirmed. The result then must -/// be rounded up to account possible future runtime upgrades. -pub const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; - -/// Increase of delivery transaction weight on Polkadot-like chain with every additional message -/// byte. -/// -/// This value is a result of -/// `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The result then -/// must be rounded up to account possible future runtime upgrades. -pub const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; - -/// Maximal number of bytes, included in the signed Polkadot-like transaction apart from the encoded -/// call itself. -/// -/// Can be computed by subtracting encoded call size from raw transaction size. -pub const TX_EXTRA_BYTES: u32 = 256; - -/// Weight of single regular message delivery transaction on Polkadot-like chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` -/// bytes is delivered. The message must have dispatch weight set to zero. The result then must be -/// rounded up to account possible future runtime upgrades. -pub const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; - -/// Weight of pay-dispatch-fee operation for inbound messages at Polkadot-like chain. -/// -/// This value corresponds to the result of -/// `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` call for your -/// chain. Don't put too much reserve there, because it is used to **decrease** -/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery -/// transactions cheaper. -pub const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; - -/// Re-export `time_units` to make usage easier. -pub use time_units::*; - -/// Human readable time units defined in terms of number of blocks. -pub mod time_units { - use super::BlockNumber; - - pub const MILLISECS_PER_BLOCK: u64 = 6000; - pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - - pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); - pub const HOURS: BlockNumber = MINUTES * 60; - pub const DAYS: BlockNumber = HOURS * 24; -} - -/// Block number type used in Polkadot-like chains. -pub type BlockNumber = u32; - -/// Hash type used in Polkadot-like chains. -pub type Hash = ::Out; - -/// Account Index (a.k.a. nonce). -pub type Index = u32; - -/// Hashing type. -pub type Hashing = BlakeTwo256; - -/// The type of object that can produce hashes on Polkadot-like chains. -pub type Hasher = BlakeTwo256; - -/// The header type used by Polkadot-like chains. -pub type Header = generic::Header; - -/// Signature type used by Polkadot-like chains. -pub type Signature = MultiSignature; - -/// Public key of account on Polkadot-like chains. -pub type AccountPublic = ::Signer; - -/// Id of account on Polkadot-like chains. -pub type AccountId = ::AccountId; - -/// Address of account on Polkadot-like chains. -pub type AccountAddress = MultiAddress; - -/// Index of a transaction on the Polkadot-like chains. -pub type Nonce = u32; - -/// Block type of Polkadot-like chains. -pub type Block = generic::Block; - -/// Polkadot-like block signed with a Justification. -pub type SignedBlock = generic::SignedBlock; - -/// The balance of an account on Polkadot-like chain. -pub type Balance = u128; - -/// Unchecked Extrinsic type. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic< - AccountAddress, - EncodedOrDecodedCall, - Signature, - SignedExtensions, ->; - -/// Account address, used by the Polkadot-like chain. -pub type Address = MultiAddress; - -/// A type of the data encoded as part of the transaction. -pub type SignedExtra = - ((), (), (), (), sp_runtime::generic::Era, Compact, (), Compact); - -/// Parameters which are part of the payload used to produce transaction signature, -/// but don't end up in the transaction itself (i.e. inherent part of the runtime). -pub type AdditionalSigned = ((), u32, u32, Hash, Hash, (), (), ()); - -/// A simplified version of signed extensions meant for producing signed transactions -/// and signed payload in the client code. -#[derive(PartialEq, Eq, Clone, RuntimeDebug, TypeInfo)] -pub struct SignedExtensions { - encode_payload: SignedExtra, - // It may be set to `None` if extensions are decoded. We are never reconstructing transactions - // (and it makes no sense to do that) => decoded version of `SignedExtensions` is only used to - // read fields of `encode_payload`. And when resigning transaction, we're reconstructing - // `SignedExtensions` from the scratch. - additional_signed: Option, - _data: sp_std::marker::PhantomData, -} - -impl parity_scale_codec::Encode for SignedExtensions { - fn using_encoded R>(&self, f: F) -> R { - self.encode_payload.using_encoded(f) - } -} - -impl parity_scale_codec::Decode for SignedExtensions { - fn decode( - input: &mut I, - ) -> Result { - SignedExtra::decode(input).map(|encode_payload| SignedExtensions { - encode_payload, - additional_signed: None, - _data: Default::default(), - }) - } -} - -impl SignedExtensions { - pub fn new( - spec_version: u32, - transaction_version: u32, - era: bp_runtime::TransactionEraOf, - genesis_hash: Hash, - nonce: Nonce, - tip: Balance, - ) -> Self { - Self { - encode_payload: ( - (), // non-zero sender - (), // spec version - (), // tx version - (), // genesis - era.frame_era(), // era - nonce.into(), // nonce (compact encoding) - (), // Check weight - tip.into(), // transaction payment / tip (compact encoding) - ), - additional_signed: Some(( - (), - spec_version, - transaction_version, - genesis_hash, - era.signed_payload(genesis_hash), - (), - (), - (), - )), - _data: Default::default(), - } - } -} - -impl SignedExtensions { - /// Return signer nonce, used to craft transaction. - pub fn nonce(&self) -> Nonce { - self.encode_payload.5.into() - } - - /// Return transaction tip. - pub fn tip(&self) -> Balance { - self.encode_payload.7.into() - } -} - -impl sp_runtime::traits::SignedExtension for SignedExtensions -where - Call: parity_scale_codec::Codec - + sp_std::fmt::Debug - + Sync - + Send - + Clone - + Eq - + PartialEq - + StaticTypeInfo, - Call: Dispatchable, -{ - const IDENTIFIER: &'static str = "Not needed."; - - type AccountId = AccountId; - type Call = Call; - type AdditionalSigned = AdditionalSigned; - type Pre = (); - - fn additional_signed(&self) -> Result { - // we shall not ever see this error in relay, because we are never signing decoded - // transactions. Instead we're constructing and signing new transactions. So the error code - // is kinda random here - self.additional_signed.ok_or(TransactionValidityError::Unknown( - frame_support::unsigned::UnknownTransaction::Custom(0xFF), - )) - } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result { - Ok(()) - } -} - -/// Polkadot-like chain. -#[derive(RuntimeDebug)] -pub struct PolkadotLike; - -impl Chain for PolkadotLike { - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Index = Index; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -/// Convert a 256-bit hash into an AccountId. -pub struct AccountIdConverter; - -impl Convert for AccountIdConverter { - fn convert(hash: sp_core::H256) -> AccountId { - hash.to_fixed_bytes().into() - } -} - -/// Return a storage key for account data. -/// -/// This is based on FRAME storage-generation code from Substrate: -/// [link](https://github.com/paritytech/substrate/blob/c939ceba381b6313462d47334f775e128ea4e95d/frame/support/src/storage/generator/map.rs#L74) -/// The equivalent command to invoke in case full `Runtime` is known is this: -/// `let key = frame_system::Account::::storage_map_final_key(&account_id);` -pub fn account_info_storage_key(id: &AccountId) -> Vec { - let module_prefix_hashed = Twox128::hash(b"System"); - let storage_prefix_hashed = Twox128::hash(b"Account"); - let key_hashed = parity_scale_codec::Encode::using_encoded(id, Blake2_128Concat::hash); - - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.len(), - ); - - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(&key_hashed); - - final_key -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::codec::Encode; - - #[test] - fn maximal_encoded_account_id_size_is_correct() { - let actual_size = AccountId::from([0u8; 32]).encode().len(); - assert!( - actual_size <= MAXIMAL_ENCODED_ACCOUNT_ID_SIZE as usize, - "Actual size of encoded account id for Polkadot-like chains ({}) is larger than expected {}", - actual_size, - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - ); - } - - #[test] - fn should_generate_storage_key() { - let acc = [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ] - .into(); - let key = account_info_storage_key(&acc); - assert_eq!(hex::encode(key), "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92dccd599abfe1920a1cff8a7358231430102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"); - } -} diff --git a/polkadot/bridges/primitives/runtime/Cargo.toml b/polkadot/bridges/primitives/runtime/Cargo.toml deleted file mode 100644 index 085cfb9dbc6..00000000000 --- a/polkadot/bridges/primitives/runtime/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "bp-runtime" -description = "Primitives that may be used at (bridges) runtime level." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -hash-db = { version = "0.15.2", default-features = false } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "hash-db/std", - "num-traits/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "sp-state-machine/std", - "sp-trie/std", -] diff --git a/polkadot/bridges/primitives/runtime/src/chain.rs b/polkadot/bridges/primitives/runtime/src/chain.rs deleted file mode 100644 index 659c9f47c56..00000000000 --- a/polkadot/bridges/primitives/runtime/src/chain.rs +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use codec::{Decode, Encode}; -use frame_support::{weights::Weight, Parameter}; -use num_traits::{AsPrimitive, Bounded, CheckedSub, Saturating, SaturatingAdd, Zero}; -use sp_runtime::{ - traits::{ - AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, - MaybeMallocSizeOf, MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify, - }, - FixedPointOperand, -}; -use sp_std::{fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; - -/// Chain call, that is either SCALE-encoded, or decoded. -#[derive(Debug, Clone, PartialEq)] -pub enum EncodedOrDecodedCall { - /// The call that is SCALE-encoded. - /// - /// This variant is used when we the chain runtime is not bundled with the relay, but - /// we still need the represent call in some RPC calls or transactions. - Encoded(Vec), - /// The decoded call. - Decoded(ChainCall), -} - -impl EncodedOrDecodedCall { - /// Returns decoded call. - pub fn to_decoded(&self) -> Result { - match self { - Self::Encoded(ref encoded_call) => - ChainCall::decode(&mut &encoded_call[..]).map_err(Into::into), - Self::Decoded(ref decoded_call) => Ok(decoded_call.clone()), - } - } - - /// Converts self to decoded call. - pub fn into_decoded(self) -> Result { - match self { - Self::Encoded(encoded_call) => - ChainCall::decode(&mut &encoded_call[..]).map_err(Into::into), - Self::Decoded(decoded_call) => Ok(decoded_call), - } - } -} - -impl From for EncodedOrDecodedCall { - fn from(call: ChainCall) -> EncodedOrDecodedCall { - EncodedOrDecodedCall::Decoded(call) - } -} - -impl Decode for EncodedOrDecodedCall { - fn decode(input: &mut I) -> Result { - // having encoded version is better than decoded, because decoding isn't required - // everywhere and for mocked calls it may lead to **unneeded** errors - match input.remaining_len()? { - Some(remaining_len) => { - let mut encoded_call = vec![0u8; remaining_len]; - input.read(&mut encoded_call)?; - Ok(EncodedOrDecodedCall::Encoded(encoded_call)) - }, - None => Ok(EncodedOrDecodedCall::Decoded(ChainCall::decode(input)?)), - } - } -} - -impl Encode for EncodedOrDecodedCall { - fn encode(&self) -> Vec { - match *self { - Self::Encoded(ref encoded_call) => encoded_call.clone(), - Self::Decoded(ref decoded_call) => decoded_call.encode(), - } - } -} - -/// Minimal Substrate-based chain representation that may be used from no_std environment. -pub trait Chain: Send + Sync + 'static { - /// A type that fulfills the abstract idea of what a Substrate block number is. - // Constraits come from the associated Number type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Number - // - // Note that the `AsPrimitive` trait is required by the GRANDPA justification - // verifier, and is not usually part of a Substrate Header's Number type. - type BlockNumber: Parameter - + Member - + MaybeSerializeDeserialize - + Hash - + Copy - + Default - + MaybeDisplay - + AtLeast32BitUnsigned - + FromStr - + MaybeMallocSizeOf - + AsPrimitive - + Default - + Saturating - // original `sp_runtime::traits::Header::BlockNumber` doesn't have this trait, but - // `sp_runtime::generic::Era` requires block number -> `u64` conversion. - + Into; - - /// A type that fulfills the abstract idea of what a Substrate hash is. - // Constraits come from the associated Hash type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hash - type Hash: Parameter - + Member - + MaybeSerializeDeserialize - + Hash - + Ord - + Copy - + MaybeDisplay - + Default - + SimpleBitOps - + AsRef<[u8]> - + AsMut<[u8]> - + MaybeMallocSizeOf; - - /// A type that fulfills the abstract idea of what a Substrate hasher (a type - /// that produces hashes) is. - // Constraits come from the associated Hashing type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hashing - type Hasher: HashT; - - /// A type that fulfills the abstract idea of what a Substrate header is. - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html - type Header: Parameter - + HeaderT - + MaybeSerializeDeserialize; - - /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord; - /// Balance of an account in native tokens. - /// - /// The chain may support multiple tokens, but this particular type is for token that is used - /// to pay for transaction dispatch, to reward different relayers (headers, messages), etc. - type Balance: AtLeast32BitUnsigned - + FixedPointOperand - + Parameter - + Parameter - + Member - + MaybeSerializeDeserialize - + Clone - + Copy - + Bounded - + CheckedSub - + PartialOrd - + SaturatingAdd - + Zero - + TryFrom; - /// Index of a transaction used by the chain. - type Index: Parameter - + Member - + MaybeSerialize - + Debug - + Default - + MaybeDisplay - + MaybeSerializeDeserialize - + AtLeast32Bit - + Copy; - /// Signature type, used on this chain. - type Signature: Parameter + Verify; - - /// Get the maximum size (in bytes) of a Normal extrinsic at this chain. - fn max_extrinsic_size() -> u32; - /// Get the maximum weight (compute time) that a Normal extrinsic at this chain can use. - fn max_extrinsic_weight() -> Weight; -} - -/// Block number used by the chain. -pub type BlockNumberOf = ::BlockNumber; - -/// Hash type used by the chain. -pub type HashOf = ::Hash; - -/// Hasher type used by the chain. -pub type HasherOf = ::Hasher; - -/// Header type used by the chain. -pub type HeaderOf = ::Header; - -/// Account id type used by the chain. -pub type AccountIdOf = ::AccountId; - -/// Balance type used by the chain. -pub type BalanceOf = ::Balance; - -/// Transaction index type used by the chain. -pub type IndexOf = ::Index; - -/// Signature type used by the chain. -pub type SignatureOf = ::Signature; - -/// Account public type used by the chain. -pub type AccountPublicOf = as Verify>::Signer; - -/// Transaction era used by the chain. -pub type TransactionEraOf = crate::TransactionEra, HashOf>; diff --git a/polkadot/bridges/primitives/runtime/src/lib.rs b/polkadot/bridges/primitives/runtime/src/lib.rs deleted file mode 100644 index f4e65959941..00000000000 --- a/polkadot/bridges/primitives/runtime/src/lib.rs +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives that may be used at (bridges) runtime level. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::Encode; -use frame_support::{RuntimeDebug, StorageHasher}; -use sp_core::{hash::H256, storage::StorageKey}; -use sp_io::hashing::blake2_256; -use sp_std::{vec, vec::Vec}; - -pub use chain::{ - AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, - HasherOf, HeaderOf, IndexOf, SignatureOf, TransactionEraOf, -}; -pub use frame_support::storage::storage_prefix as storage_value_final_key; -pub use storage_proof::{Error as StorageProofError, StorageProofChecker}; - -#[cfg(feature = "std")] -pub use storage_proof::craft_valid_storage_proof; - -pub mod messages; - -mod chain; -mod storage_proof; - -/// Use this when something must be shared among all instances. -pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0]; - -/// Bridge-with-Rialto instance id. -pub const RIALTO_CHAIN_ID: ChainId = *b"rlto"; - -/// Bridge-with-Millau instance id. -pub const MILLAU_CHAIN_ID: ChainId = *b"mlau"; - -/// Bridge-with-Polkadot instance id. -pub const POLKADOT_CHAIN_ID: ChainId = *b"pdot"; - -/// Bridge-with-Kusama instance id. -pub const KUSAMA_CHAIN_ID: ChainId = *b"ksma"; - -/// Bridge-with-Rococo instance id. -pub const ROCOCO_CHAIN_ID: ChainId = *b"roco"; - -/// Bridge-with-Wococo instance id. -pub const WOCOCO_CHAIN_ID: ChainId = *b"woco"; - -/// Call-dispatch module prefix. -pub const CALL_DISPATCH_MODULE_PREFIX: &[u8] = b"pallet-bridge/dispatch"; - -/// A unique prefix for entropy when generating cross-chain account IDs. -pub const ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/account"; - -/// A unique prefix for entropy when generating a cross-chain account ID for the Root account. -pub const ROOT_ACCOUNT_DERIVATION_PREFIX: &[u8] = b"pallet-bridge/account-derivation/root"; - -/// Generic header Id. -#[derive(RuntimeDebug, Default, Clone, Copy, Eq, Hash, PartialEq)] -pub struct HeaderId(pub Number, pub Hash); - -/// Unique identifier of the chain. -/// -/// In addition to its main function (identifying the chain), this type may also be used to -/// identify module instance. We have a bunch of pallets that may be used in different bridges. E.g. -/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and -/// Chain2. Sometimes we need to be able to identify deployed instance dynamically. This type may be -/// used for that. -pub type ChainId = [u8; 4]; - -/// Type of accounts on the source chain. -pub enum SourceAccount { - /// An account that belongs to Root (privileged origin). - Root, - /// A non-privileged account. - /// - /// The embedded account ID may or may not have a private key depending on the "owner" of the - /// account (private key, pallet, proxy, etc.). - Account(T), -} - -/// Derive an account ID from a foreign account ID. -/// -/// This function returns an encoded Blake2 hash. It is the responsibility of the caller to ensure -/// this can be successfully decoded into an AccountId. -/// -/// The `bridge_id` is used to provide extra entropy when producing account IDs. This helps prevent -/// AccountId collisions between different bridges on a single target chain. -/// -/// Note: If the same `bridge_id` is used across different chains (for example, if one source chain -/// is bridged to multiple target chains), then all the derived accounts would be the same across -/// the different chains. This could negatively impact users' privacy across chains. -pub fn derive_account_id(bridge_id: ChainId, id: SourceAccount) -> H256 -where - AccountId: Encode, -{ - match id { - SourceAccount::Root => - (ROOT_ACCOUNT_DERIVATION_PREFIX, bridge_id).using_encoded(blake2_256), - SourceAccount::Account(id) => - (ACCOUNT_DERIVATION_PREFIX, bridge_id, id).using_encoded(blake2_256), - } - .into() -} - -/// Derive the account ID of the shared relayer fund account. -/// -/// This account is used to collect fees for relayers that are passing messages across the bridge. -/// -/// The account ID can be the same across different instances of `pallet-bridge-messages` if the -/// same `bridge_id` is used. -pub fn derive_relayer_fund_account_id(bridge_id: ChainId) -> H256 { - ("relayer-fund-account", bridge_id).using_encoded(blake2_256).into() -} - -/// Anything that has size. -pub trait Size { - /// Return approximate size of this object (in bytes). - /// - /// This function should be lightweight. The result should not necessary be absolutely - /// accurate. - fn size_hint(&self) -> u32; -} - -impl Size for &[u8] { - fn size_hint(&self) -> u32 { - self.len() as _ - } -} - -impl Size for () { - fn size_hint(&self) -> u32 { - 0 - } -} - -/// Pre-computed size. -pub struct PreComputedSize(pub usize); - -impl Size for PreComputedSize { - fn size_hint(&self) -> u32 { - u32::try_from(self.0).unwrap_or(u32::MAX) - } -} - -/// Era of specific transaction. -#[derive(RuntimeDebug, Clone, Copy)] -pub enum TransactionEra { - /// Transaction is immortal. - Immortal, - /// Transaction is valid for a given number of blocks, starting from given block. - Mortal(HeaderId, u32), -} - -impl, BlockHash: Copy> TransactionEra { - /// Prepare transaction era, based on mortality period and current best block number. - pub fn new( - best_block_id: HeaderId, - mortality_period: Option, - ) -> Self { - mortality_period - .map(|mortality_period| TransactionEra::Mortal(best_block_id, mortality_period)) - .unwrap_or(TransactionEra::Immortal) - } - - /// Create new immortal transaction era. - pub fn immortal() -> Self { - TransactionEra::Immortal - } - - /// Returns era that is used by FRAME-based runtimes. - pub fn frame_era(&self) -> sp_runtime::generic::Era { - match *self { - TransactionEra::Immortal => sp_runtime::generic::Era::immortal(), - TransactionEra::Mortal(header_id, period) => - sp_runtime::generic::Era::mortal(period as _, header_id.0.into()), - } - } - - /// Returns header hash that needs to be included in the signature payload. - pub fn signed_payload(&self, genesis_hash: BlockHash) -> BlockHash { - match *self { - TransactionEra::Immortal => genesis_hash, - TransactionEra::Mortal(header_id, _) => header_id.1, - } - } -} - -/// This is a copy of the -/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for maps based -/// on selected hasher. -/// -/// We're using it because to call `storage_map_final_key` directly, we need access to the runtime -/// and pallet instance, which (sometimes) is impossible. -pub fn storage_map_final_key( - pallet_prefix: &str, - map_name: &str, - key: &[u8], -) -> StorageKey { - let key_hashed = H::hash(key); - let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes()); - let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes()); - - let mut final_key = Vec::with_capacity( - pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), - ); - - final_key.extend_from_slice(&pallet_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key_hashed.as_ref()); - - StorageKey(final_key) -} - -/// This is how a storage key of storage parameter (`parameter_types! { storage Param: bool = false; -/// }`) is computed. -/// -/// Copied from `frame_support::parameter_types` macro. -pub fn storage_parameter_key(parameter_name: &str) -> StorageKey { - let mut buffer = Vec::with_capacity(1 + parameter_name.len() + 1); - buffer.push(b':'); - buffer.extend_from_slice(parameter_name.as_bytes()); - buffer.push(b':'); - StorageKey(sp_io::hashing::twox_128(&buffer).to_vec()) -} - -/// This is how a storage key of storage value is computed. -/// -/// Copied from `frame_support::storage::storage_prefix`. -pub fn storage_value_key(pallet_prefix: &str, value_name: &str) -> StorageKey { - let pallet_hash = sp_io::hashing::twox_128(pallet_prefix.as_bytes()); - let storage_hash = sp_io::hashing::twox_128(value_name.as_bytes()); - - let mut final_key = vec![0u8; 32]; - final_key[..16].copy_from_slice(&pallet_hash); - final_key[16..].copy_from_slice(&storage_hash); - - StorageKey(final_key) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn storage_parameter_key_works() { - assert_eq!( - storage_parameter_key("MillauToRialtoConversionRate"), - StorageKey(hex_literal::hex!("58942375551bb0af1682f72786b59d04").to_vec()), - ); - } - - #[test] - fn storage_value_key_works() { - assert_eq!( - storage_value_key("PalletTransactionPayment", "NextFeeMultiplier"), - StorageKey( - hex_literal::hex!( - "f0e954dfcca51a255ab12c60c789256a3f2edf3bdf381debe331ab7446addfdc" - ) - .to_vec() - ), - ); - } -} diff --git a/polkadot/bridges/primitives/runtime/src/messages.rs b/polkadot/bridges/primitives/runtime/src/messages.rs deleted file mode 100644 index 7a6687c18b7..00000000000 --- a/polkadot/bridges/primitives/runtime/src/messages.rs +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives that may be used by different message delivery and dispatch mechanisms. - -use codec::{Decode, Encode}; -use frame_support::{weights::Weight, RuntimeDebug}; -use scale_info::TypeInfo; - -/// Where message dispatch fee is paid? -#[derive(Encode, Decode, RuntimeDebug, Clone, Copy, PartialEq, Eq, TypeInfo)] -pub enum DispatchFeePayment { - /// The dispatch fee is paid at the source chain. - AtSourceChain, - /// The dispatch fee is paid at the target chain. - /// - /// The fee will be paid right before the message is dispatched. So in case of any other - /// issues (like invalid call encoding, invalid signature, ...) the dispatch module won't - /// do any direct transfers. Instead, it'll return fee related to this message dispatch to the - /// relayer. - AtTargetChain, -} - -/// Message dispatch result. -#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct MessageDispatchResult { - /// Dispatch result flag. This flag is relayed back to the source chain and, generally - /// speaking, may bring any (that fits in single bit) information from the dispatcher at - /// the target chain to the message submitter at the source chain. If you're using immediate - /// call dispatcher, then it'll be result of the dispatch - `true` if dispatch has succeeded - /// and `false` otherwise. - pub dispatch_result: bool, - /// Unspent dispatch weight. This weight that will be deducted from total delivery transaction - /// weight, thus reducing the transaction cost. This shall not be zero in (at least) two cases: - /// - /// 1) if message has been dispatched successfully, but post-dispatch weight is less than - /// the weight, declared by the message sender; - /// 2) if message has not been dispatched at all. - pub unspent_weight: Weight, - /// Whether the message dispatch fee has been paid during dispatch. This will be true if your - /// configuration supports pay-dispatch-fee-at-target-chain option and message sender has - /// enabled this option. - pub dispatch_fee_paid_during_dispatch: bool, -} diff --git a/polkadot/bridges/primitives/runtime/src/storage_proof.rs b/polkadot/bridges/primitives/runtime/src/storage_proof.rs deleted file mode 100644 index 4a99ab6210f..00000000000 --- a/polkadot/bridges/primitives/runtime/src/storage_proof.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Logic for checking Substrate storage proofs. - -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; -use sp_runtime::RuntimeDebug; -use sp_std::vec::Vec; -use sp_trie::{read_trie_value, LayoutV1, MemoryDB, StorageProof}; - -/// This struct is used to read storage values from a subset of a Merklized database. The "proof" -/// is a subset of the nodes in the Merkle structure of the database, so that it provides -/// authentication against a known Merkle root as well as the values in the database themselves. -pub struct StorageProofChecker -where - H: Hasher, -{ - root: H::Out, - db: MemoryDB, -} - -impl StorageProofChecker -where - H: Hasher, -{ - /// Constructs a new storage proof checker. - /// - /// This returns an error if the given proof is invalid with respect to the given root. - pub fn new(root: H::Out, proof: StorageProof) -> Result { - let db = proof.into_memory_db(); - if !db.contains(&root, EMPTY_PREFIX) { - return Err(Error::StorageRootMismatch) - } - - let checker = StorageProofChecker { root, db }; - Ok(checker) - } - - /// Reads a value from the available subset of storage. If the value cannot be read due to an - /// incomplete or otherwise invalid proof, this returns an error. - pub fn read_value(&self, key: &[u8]) -> Result>, Error> { - // LayoutV1 or LayoutV0 is identical for proof that only read values. - read_trie_value::, _>(&self.db, &self.root, key) - .map_err(|_| Error::StorageValueUnavailable) - } -} - -#[derive(RuntimeDebug, PartialEq)] -pub enum Error { - StorageRootMismatch, - StorageValueUnavailable, -} - -/// Return valid storage proof and state root. -/// -/// NOTE: This should only be used for **testing**. -#[cfg(feature = "std")] -pub fn craft_valid_storage_proof() -> (sp_core::H256, StorageProof) { - use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; - - let state_version = sp_runtime::StateVersion::default(); - - // construct storage proof - let backend = >::from(( - vec![ - (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), - (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), - (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), - // Value is too big to fit in a branch node - (None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]), - ], - state_version, - )); - let root = backend.storage_root(std::iter::empty(), state_version).0; - let proof = StorageProof::new( - prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key22"[..]]) - .unwrap() - .iter_nodes(), - ); - - (root, proof) -} - -#[cfg(test)] -pub mod tests { - use super::*; - - #[test] - fn storage_proof_check() { - let (root, proof) = craft_valid_storage_proof(); - - // check proof in runtime - let checker = - >::new(root, proof.clone()).unwrap(); - assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); - assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); - assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); - assert_eq!(checker.read_value(b"key22"), Ok(None)); - - // checking proof against invalid commitment fails - assert_eq!( - >::new(sp_core::H256::random(), proof).err(), - Some(Error::StorageRootMismatch) - ); - } -} diff --git a/polkadot/bridges/primitives/test-utils/Cargo.toml b/polkadot/bridges/primitives/test-utils/Cargo.toml deleted file mode 100644 index 7760f4f8275..00000000000 --- a/polkadot/bridges/primitives/test-utils/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "bp-test-utils" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -bp-header-chain = { path = "../header-chain", default-features = false } -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -ed25519-dalek = { version = "1.0", default-features = false, features = ["u64_backend"] } -finality-grandpa = { version = "0.16.0", default-features = false } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "codec/std", - "ed25519-dalek/std", - "finality-grandpa/std", - "sp-application-crypto/std", - "sp-finality-grandpa/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/test-utils/src/keyring.rs b/polkadot/bridges/primitives/test-utils/src/keyring.rs deleted file mode 100644 index 2436d793392..00000000000 --- a/polkadot/bridges/primitives/test-utils/src/keyring.rs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for working with test accounts. - -use codec::Encode; -use ed25519_dalek::{Keypair, PublicKey, SecretKey, Signature}; -use finality_grandpa::voter_set::VoterSet; -use sp_finality_grandpa::{AuthorityId, AuthorityList, AuthorityWeight}; -use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; - -/// Set of test accounts with friendly names. -pub const ALICE: Account = Account(0); -pub const BOB: Account = Account(1); -pub const CHARLIE: Account = Account(2); -pub const DAVE: Account = Account(3); -pub const EVE: Account = Account(4); -pub const FERDIE: Account = Account(5); - -/// A test account which can be used to sign messages. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Account(pub u16); - -impl Account { - pub fn public(&self) -> PublicKey { - (&self.secret()).into() - } - - pub fn secret(&self) -> SecretKey { - let data = self.0.encode(); - let mut bytes = [0_u8; 32]; - bytes[0..data.len()].copy_from_slice(&*data); - SecretKey::from_bytes(&bytes) - .expect("A static array of the correct length is a known good.") - } - - pub fn pair(&self) -> Keypair { - let mut pair: [u8; 64] = [0; 64]; - - let secret = self.secret(); - pair[..32].copy_from_slice(&secret.to_bytes()); - - let public = self.public(); - pair[32..].copy_from_slice(&public.to_bytes()); - - Keypair::from_bytes(&pair) - .expect("We expect the SecretKey to be good, so this must also be good.") - } - - pub fn sign(&self, msg: &[u8]) -> Signature { - use ed25519_dalek::Signer; - self.pair().sign(msg) - } -} - -impl From for AuthorityId { - fn from(p: Account) -> Self { - sp_application_crypto::UncheckedFrom::unchecked_from(p.public().to_bytes()) - } -} - -/// Get a valid set of voters for a Grandpa round. -pub fn voter_set() -> VoterSet { - VoterSet::new(authority_list()).unwrap() -} - -/// Convenience function to get a list of Grandpa authorities. -pub fn authority_list() -> AuthorityList { - test_keyring().iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect() -} - -/// Get the corresponding identities from the keyring for the "standard" authority set. -pub fn test_keyring() -> Vec<(Account, AuthorityWeight)> { - vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)] -} - -/// Get a list of "unique" accounts. -pub fn accounts(len: u16) -> Vec { - (0..len).into_iter().map(Account).collect() -} diff --git a/polkadot/bridges/primitives/test-utils/src/lib.rs b/polkadot/bridges/primitives/test-utils/src/lib.rs deleted file mode 100644 index 38d9453c98f..00000000000 --- a/polkadot/bridges/primitives/test-utils/src/lib.rs +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for testing runtime code. - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_header_chain::justification::GrandpaJustification; -use codec::Encode; -use sp_finality_grandpa::{AuthorityId, AuthoritySignature, AuthorityWeight, SetId}; -use sp_runtime::traits::{Header as HeaderT, One, Zero}; -use sp_std::prelude::*; - -// Re-export all our test account utilities -pub use keyring::*; - -mod keyring; - -pub const TEST_GRANDPA_ROUND: u64 = 1; -pub const TEST_GRANDPA_SET_ID: SetId = 1; - -/// Configuration parameters when generating test GRANDPA justifications. -#[derive(Clone)] -pub struct JustificationGeneratorParams { - /// The header which we want to finalize. - pub header: H, - /// The GRANDPA round number for the current authority set. - pub round: u64, - /// The current authority set ID. - pub set_id: SetId, - /// The current GRANDPA authority set. - /// - /// The size of the set will determine the number of pre-commits in our justification. - pub authorities: Vec<(Account, AuthorityWeight)>, - /// The total number of precommit ancestors in the `votes_ancestries` field our justification. - /// - /// These may be distributed among many forks. - pub ancestors: u32, - /// The number of forks. - /// - /// Useful for creating a "worst-case" scenario in which each authority is on its own fork. - pub forks: u32, -} - -impl Default for JustificationGeneratorParams { - fn default() -> Self { - Self { - header: test_header(One::one()), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: test_keyring(), - ancestors: 2, - forks: 1, - } - } -} - -/// Make a valid GRANDPA justification with sensible defaults -pub fn make_default_justification(header: &H) -> GrandpaJustification { - let params = JustificationGeneratorParams:: { header: header.clone(), ..Default::default() }; - - make_justification_for_header(params) -} - -/// Generate justifications in a way where we are able to tune the number of pre-commits -/// and vote ancestries which are included in the justification. -/// -/// This is useful for benchmarkings where we want to generate valid justifications with -/// a specific number of pre-commits (tuned with the number of "authorities") and/or a specific -/// number of vote ancestries (tuned with the "votes" parameter). -/// -/// Note: This needs at least three authorities or else the verifier will complain about -/// being given an invalid commit. -pub fn make_justification_for_header( - params: JustificationGeneratorParams, -) -> GrandpaJustification { - let JustificationGeneratorParams { header, round, set_id, authorities, mut ancestors, forks } = - params; - let (target_hash, target_number) = (header.hash(), *header.number()); - let mut votes_ancestries = vec![]; - let mut precommits = vec![]; - - assert!(forks != 0, "Need at least one fork to have a chain.."); - assert!( - forks as usize <= authorities.len(), - "If we have more forks than authorities we can't create valid pre-commits for all the forks." - ); - - // Roughly, how many vote ancestries do we want per fork - let target_depth = (ancestors + forks - 1) / forks; - - let mut unsigned_precommits = vec![]; - for i in 0..forks { - let depth = if ancestors >= target_depth { - ancestors -= target_depth; - target_depth - } else { - ancestors - }; - - // Note: Adding 1 to account for the target header - let chain = generate_chain(i as u32, depth + 1, &header); - - // We don't include our finality target header in the vote ancestries - for child in &chain[1..] { - votes_ancestries.push(child.clone()); - } - - // The header we need to use when pre-commiting is the one at the highest height - // on our chain. - let precommit_candidate = chain.last().map(|h| (h.hash(), *h.number())).unwrap(); - unsigned_precommits.push(precommit_candidate); - } - - for (i, (id, _weight)) in authorities.iter().enumerate() { - // Assign authorities to sign pre-commits in a round-robin fashion - let target = unsigned_precommits[i % forks as usize]; - let precommit = signed_precommit::(id, target, round, set_id); - - precommits.push(precommit); - } - - GrandpaJustification { - round, - commit: finality_grandpa::Commit { target_hash, target_number, precommits }, - votes_ancestries, - } -} - -fn generate_chain(fork_id: u32, depth: u32, ancestor: &H) -> Vec { - let mut headers = vec![ancestor.clone()]; - - for i in 1..depth { - let parent = &headers[(i - 1) as usize]; - let (hash, num) = (parent.hash(), *parent.number()); - - let mut header = test_header::(num + One::one()); - header.set_parent_hash(hash); - - // Modifying the digest so headers at the same height but in different forks have different - // hashes - header.digest_mut().logs.push(sp_runtime::DigestItem::Other(fork_id.encode())); - - headers.push(header); - } - - headers -} - -/// Create signed precommit with given target. -pub fn signed_precommit( - signer: &Account, - target: (H::Hash, H::Number), - round: u64, - set_id: SetId, -) -> finality_grandpa::SignedPrecommit { - let precommit = finality_grandpa::Precommit { target_hash: target.0, target_number: target.1 }; - - let encoded = sp_finality_grandpa::localized_payload( - round, - set_id, - &finality_grandpa::Message::Precommit(precommit.clone()), - ); - - let signature = signer.sign(&encoded); - let raw_signature: Vec = signature.to_bytes().into(); - - // Need to wrap our signature and id types that they match what our `SignedPrecommit` is - // expecting - let signature = AuthoritySignature::try_from(raw_signature).expect( - "We know our Keypair is good, - so our signature must also be good.", - ); - let id = (*signer).into(); - - finality_grandpa::SignedPrecommit { precommit, signature, id } -} - -/// Get a header for testing. -/// -/// The correct parent hash will be used if given a non-zero header. -pub fn test_header(number: H::Number) -> H { - let default = |num| { - H::new(num, Default::default(), Default::default(), Default::default(), Default::default()) - }; - - let mut header = default(number); - if number != Zero::zero() { - let parent_hash = default(number - One::one()).hash(); - header.set_parent_hash(parent_hash); - } - - header -} - -/// Convenience function for generating a Header ID at a given block number. -pub fn header_id(index: u8) -> (H::Hash, H::Number) { - (test_header::(index.into()).hash(), index.into()) -} diff --git a/polkadot/bridges/primitives/token-swap/Cargo.toml b/polkadot/bridges/primitives/token-swap/Cargo.toml deleted file mode 100644 index 9097856f853..00000000000 --- a/polkadot/bridges/primitives/token-swap/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "bp-token-swap" -description = "Primitives of the pallet-bridge-token-swap pallet" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } - -# Bridge Dependencies - -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } - -[dev-dependencies] -hex = "0.4" -hex-literal = "0.3" - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-core/std", - "sp-io/std", - "sp-std/std", -] diff --git a/polkadot/bridges/primitives/token-swap/src/lib.rs b/polkadot/bridges/primitives/token-swap/src/lib.rs deleted file mode 100644 index 79363e5477a..00000000000 --- a/polkadot/bridges/primitives/token-swap/src/lib.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg_attr(not(feature = "std"), no_std)] - -pub mod storage_keys; - -use codec::{Decode, Encode}; -use frame_support::{weights::Weight, RuntimeDebug}; -use scale_info::TypeInfo; -use sp_core::{H256, U256}; -use sp_io::hashing::blake2_256; -use sp_std::vec::Vec; - -/// Pending token swap state. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub enum TokenSwapState { - /// The swap has been started using the `start_claim` call, but we have no proof that it has - /// happened at the Bridged chain. - Started, - /// The swap has happened at the Bridged chain and may be claimed by the Bridged chain party - /// using the `claim_swap` call. - Confirmed, - /// The swap has failed at the Bridged chain and This chain party may cancel it using the - /// `cancel_swap` call. - Failed, -} - -/// Token swap type. -/// -/// Different swap types give a different guarantees regarding possible swap -/// replay protection. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub enum TokenSwapType { - /// The `target_account_at_bridged_chain` is temporary and only have funds for single swap. - /// - /// ***WARNING**: if `target_account_at_bridged_chain` still exists after the swap has been - /// completed (either by claiming or canceling), the `source_account_at_this_chain` will be - /// able to restart the swap again and repeat the swap until `target_account_at_bridged_chain` - /// depletes. - TemporaryTargetAccountAtBridgedChain, - /// This swap type prevents `source_account_at_this_chain` from restarting the swap after it - /// has been completed. There are two consequences: - /// - /// 1) the `source_account_at_this_chain` won't be able to call `start_swap` after given - /// ; 2) the `target_account_at_bridged_chain` won't be able to call - /// `claim_swap` (over the bridge) before block ``. - /// - /// The second element is the nonce of the swap. You must care about its uniqueness if you're - /// planning to perform another swap with exactly the same parameters (i.e. same amount, same - /// accounts, same `ThisBlockNumber`) to avoid collisions. - LockClaimUntilBlock(ThisBlockNumber, U256), -} - -/// An intention to swap `source_balance_at_this_chain` owned by `source_account_at_this_chain` -/// to `target_balance_at_bridged_chain` owned by `target_account_at_bridged_chain`. -/// -/// **IMPORTANT NOTE**: this structure is always the same during single token swap. So even -/// when chain changes, the meaning of This and Bridged are still used to point to the same chains. -/// This chain is always the chain where swap has been started. And the Bridged chain is the other -/// chain. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct TokenSwap -{ - /// The type of the swap. - pub swap_type: TokenSwapType, - /// This chain balance to be swapped with `target_balance_at_bridged_chain`. - pub source_balance_at_this_chain: ThisBalance, - /// Account id of the party acting at This chain and owning the `source_account_at_this_chain`. - pub source_account_at_this_chain: ThisAccountId, - /// Bridged chain balance to be swapped with `source_balance_at_this_chain`. - pub target_balance_at_bridged_chain: BridgedBalance, - /// Account id of the party acting at the Bridged chain and owning the - /// `target_balance_at_bridged_chain`. - pub target_account_at_bridged_chain: BridgedAccountId, -} - -impl - TokenSwap -where - TokenSwap: - Encode, -{ - /// Returns hash, used to identify this token swap. - pub fn hash(&self) -> H256 { - self.using_encoded(blake2_256).into() - } -} - -/// SCALE-encoded `Currency::transfer` call on the bridged chain. -pub type RawBridgedTransferCall = Vec; - -/// Token swap creation parameters. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct TokenSwapCreation { - /// Public key of the `target_account_at_bridged_chain` account used to verify - /// `bridged_currency_transfer_signature`. - pub target_public_at_bridged_chain: BridgedAccountPublic, - /// Fee that the `source_account_at_this_chain` is ready to pay for the tokens - /// transfer message delivery and dispatch. - pub swap_delivery_and_dispatch_fee: ThisChainBalance, - /// Specification version of the Bridged chain. - pub bridged_chain_spec_version: u32, - /// SCALE-encoded tokens transfer call at the Bridged chain. - pub bridged_currency_transfer: RawBridgedTransferCall, - /// Dispatch weight of the tokens transfer call at the Bridged chain. - pub bridged_currency_transfer_weight: Weight, - /// The signature of the `target_account_at_bridged_chain` for the message - /// returned by the `pallet_bridge_dispatch::account_ownership_digest()` function call. - pub bridged_currency_transfer_signature: BridgedAccountSignature, -} diff --git a/polkadot/bridges/primitives/token-swap/src/storage_keys.rs b/polkadot/bridges/primitives/token-swap/src/storage_keys.rs deleted file mode 100644 index d0aafc0d5c2..00000000000 --- a/polkadot/bridges/primitives/token-swap/src/storage_keys.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage keys of bridge token swap pallet. - -use frame_support::Identity; -use sp_core::{storage::StorageKey, H256}; - -/// Name of the `PendingSwaps` storage map. -pub const PENDING_SWAPS_MAP_NAME: &str = "PendingSwaps"; - -/// Storage key of `PendingSwaps` value with given token swap hash. -pub fn pending_swaps_key(pallet_prefix: &str, token_swap_hash: H256) -> StorageKey { - bp_runtime::storage_map_final_key::( - pallet_prefix, - PENDING_SWAPS_MAP_NAME, - token_swap_hash.as_ref(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn pending_swaps_key_computed_properly() { - // If this test fails, then something has been changed in module storage that may break - // all previous swaps. - let storage_key = pending_swaps_key("BridgeTokenSwap", [42u8; 32].into()).0; - assert_eq!( - storage_key, - hex!("76276da64e7a4f454760eedeb4bad11adca2227fef56ad07cc424f1f5d128b9a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/Cargo.toml b/polkadot/bridges/relays/bin-substrate/Cargo.toml deleted file mode 100644 index 6d7a62e9834..00000000000 --- a/polkadot/bridges/relays/bin-substrate/Cargo.toml +++ /dev/null @@ -1,82 +0,0 @@ -[package] -name = "substrate-relay" -version = "1.0.1" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1.42" -codec = { package = "parity-scale-codec", version = "3.0.0" } -futures = "0.3.12" -hex = "0.4" -log = "0.4.14" -num-format = "0.4" -num-traits = "0.2" -paste = "1.0" -rand = "0.8" -structopt = "0.3" -strum = { version = "0.21.0", features = ["derive"] } - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-kusama = { path = "../../primitives/chain-kusama" } -bp-messages = { path = "../../primitives/messages" } -bp-message-dispatch = { path = "../../primitives/message-dispatch" } -bp-millau = { path = "../../primitives/chain-millau" } -bp-polkadot = { path = "../../primitives/chain-polkadot" } -bp-rialto = { path = "../../primitives/chain-rialto" } -bp-rialto-parachain = { path = "../../primitives/chain-rialto-parachain" } -bp-rococo = { path = "../../primitives/chain-rococo" } -bp-runtime = { path = "../../primitives/runtime" } -bp-token-swap = { path = "../../primitives/token-swap" } -bp-westend = { path = "../../primitives/chain-westend" } -bp-wococo = { path = "../../primitives/chain-wococo" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -finality-relay = { path = "../finality" } -messages-relay = { path = "../messages" } -millau-runtime = { path = "../../bin/millau/runtime" } -pallet-bridge-dispatch = { path = "../../modules/dispatch" } -pallet-bridge-grandpa = { path = "../../modules/grandpa" } -pallet-bridge-messages = { path = "../../modules/messages" } -pallet-bridge-token-swap = { path = "../../modules/token-swap" } -relay-kusama-client = { path = "../client-kusama" } -relay-millau-client = { path = "../client-millau" } -relay-polkadot-client = { path = "../client-polkadot" } -relay-rialto-client = { path = "../client-rialto" } -relay-rialto-parachain-client = { path = "../client-rialto-parachain" } -relay-rococo-client = { path = "../client-rococo" } -relay-wococo-client = { path = "../client-wococo" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -relay-westend-client = { path = "../client-westend" } -rialto-parachain-runtime = { path = "../../bin/rialto-parachain/runtime" } -rialto-runtime = { path = "../../bin/rialto/runtime" } -substrate-relay-helper = { path = "../lib-substrate-relay" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } - -# Polkadot Dependencies - -polkadot-parachain = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-primitives = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot", branch = "master" } -polkadot-runtime-parachains = { git = "https://github.com/paritytech/polkadot", branch = "master" } - -[dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -hex-literal = "0.3" -pallet-bridge-grandpa = { path = "../../modules/grandpa" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -tempfile = "3.2" -finality-grandpa = { version = "0.16.0" } diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs deleted file mode 100644 index 9cdc6cd125e..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/kusama.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use anyhow::anyhow; -use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::EncodedOrDecodedCall; -use codec::Decode; -use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; -use relay_kusama_client::Kusama; -use sp_version::RuntimeVersion; - -use crate::cli::{ - bridge, - encode_call::{self, Call, CliEncodeCall}, - encode_message, - send_message::{self, DispatchFeePayment}, - CliChain, -}; - -/// Weight of the `system::remark` call at Kusama. -/// -/// This weight is larger (x2) than actual weight at current Kusama runtime to avoid unsuccessful -/// calls in the future. But since it is used only in tests (and on test chains), this is ok. -pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000; - -impl CliEncodeCall for Kusama { - fn encode_call(call: &Call) -> anyhow::Result> { - Ok(match call { - Call::Raw { data } => EncodedOrDecodedCall::Encoded(data.0.clone()), - Call::Remark { remark_payload, .. } => relay_kusama_client::runtime::Call::System( - relay_kusama_client::runtime::SystemCall::remark( - remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - ), - ) - .into(), - Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => - match *bridge_instance_index { - bridge::KUSAMA_TO_POLKADOT_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - relay_kusama_client::runtime::Call::BridgePolkadotMessages( - relay_kusama_client::runtime::BridgePolkadotMessagesCall::send_message( - lane.0, payload, fee.0, - ), - ) - .into() - }, - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, - _ => anyhow::bail!("Unsupported Kusama call: {:?}", call), - }) - } - - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result { - match *call { - EncodedOrDecodedCall::Decoded(relay_kusama_client::runtime::Call::System( - relay_kusama_client::runtime::SystemCall::remark(_), - )) => Ok(DispatchInfo { - weight: crate::chains::kusama::SYSTEM_REMARK_CALL_WEIGHT, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }), - _ => anyhow::bail!("Unsupported Kusama call: {:?}", call), - } - } -} - -impl CliChain for Kusama { - const RUNTIME_VERSION: RuntimeVersion = bp_kusama::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload< - bp_kusama::AccountId, - bp_polkadot::AccountPublic, - bp_polkadot::Signature, - Vec, - >; - - fn ss58_format() -> u16 { - sp_core::crypto::Ss58AddressFormat::from( - sp_core::crypto::Ss58AddressFormatRegistry::KusamaAccount, - ) - .into() - } - - fn encode_message( - message: encode_message::MessagePayload, - ) -> anyhow::Result { - match message { - encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| anyhow!("Failed to decode Kusama's MessagePayload: {:?}", e)), - encode_message::MessagePayload::Call { mut call, mut sender, dispatch_weight } => { - type Source = Kusama; - type Target = relay_polkadot_client::Polkadot; - - sender.enforce_chain::(); - let spec_version = Target::RUNTIME_VERSION.spec_version; - let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::( - &mut call, - bridge::KUSAMA_TO_POLKADOT_INDEX, - ); - let call = Target::encode_call(&call)?; - let dispatch_weight = dispatch_weight.map(Ok).unwrap_or_else(|| { - Err(anyhow::format_err!( - "Please specify dispatch weight of the encoded Polkadot call" - )) - })?; - - Ok(send_message::message_payload( - spec_version, - dispatch_weight, - origin, - &call, - DispatchFeePayment::AtSourceChain, - )) - }, - } - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs deleted file mode 100644 index fbf0ab2ecc0..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_headers_to_polkadot.rs +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Kusama-to-Polkadot headers sync entrypoint. - -use async_trait::async_trait; -use relay_polkadot_client::Polkadot; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; - -/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat -/// relay as gone wild. -/// -/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 21 -/// DOT, and initial value of this constant was rounded up to 30 DOT. But for actual Kusama <> -/// Polkadot deployment we'll be using the same account for delivering finality (free for mandatory -/// headers) and messages. It means that we can't predict maximal loss. But to protect funds against -/// relay/deployment issues, let's limit it so something that is much larger than this estimation - -/// e.g. to 100 DOT. -// TODO: https://github.com/paritytech/parity-bridges-common/issues/1307 -pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_polkadot::Balance = 100 * 10_000_000_000; - -/// Description of Kusama -> Polkadot finalized headers bridge. -#[derive(Clone, Debug)] -pub struct KusamaFinalityToPolkadot; -substrate_relay_helper::generate_mocked_submit_finality_proof_call_builder!( - KusamaFinalityToPolkadot, - KusamaFinalityToPolkadotCallBuilder, - relay_polkadot_client::runtime::Call::BridgeKusamaGrandpa, - relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::submit_finality_proof -); - -#[async_trait] -impl SubstrateFinalitySyncPipeline for KusamaFinalityToPolkadot { - type SourceChain = relay_kusama_client::Kusama; - type TargetChain = Polkadot; - - type SubmitFinalityProofCallBuilder = KusamaFinalityToPolkadotCallBuilder; - type TransactionSignScheme = Polkadot; - - async fn start_relay_guards( - target_client: &relay_substrate_client::Client, - transaction_params: &TransactionParams, - enable_version_guard: bool, - ) -> relay_substrate_client::Result<()> { - substrate_relay_helper::finality_guards::start::( - target_client, - transaction_params, - enable_version_guard, - MAXIMAL_BALANCE_DECREASE_PER_DAY, - ) - .await - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use frame_support::weights::WeightToFeePolynomial; - use pallet_bridge_grandpa::weights::WeightInfo; - - pub fn compute_maximal_balance_decrease_per_day(expected_source_headers_per_day: u32) -> B - where - B: From + std::ops::Mul, - W: WeightToFeePolynomial, - { - // we assume that the GRANDPA is not lagging here => ancestry length will be near to 0 - // (let's round up to 2) - const AVG_VOTES_ANCESTRIES_LEN: u32 = 2; - // let's assume number of validators is 1024 (more than on any existing well-known chain - // atm) => number of precommits is *2/3 + 1 - const AVG_PRECOMMITS_LEN: u32 = 1024 * 2 / 3 + 1; - - // GRANDPA pallet weights. We're now using Rialto weights everywhere. - // - // Using Rialto runtime is slightly incorrect, because `DbWeight` of other runtimes may - // differ from the `DbWeight` of Rialto runtime. But now (and most probably forever) it is - // the same. - type GrandpaPalletWeights = - pallet_bridge_grandpa::weights::MillauWeight; - - // The following formula shall not be treated as super-accurate - guard is to protect from - // mad relays, not to protect from over-average loses. - - // increase number of headers a bit - let expected_source_headers_per_day = expected_source_headers_per_day * 110 / 100; - let single_source_header_submit_call_weight = GrandpaPalletWeights::submit_finality_proof( - AVG_VOTES_ANCESTRIES_LEN, - AVG_PRECOMMITS_LEN, - ); - // for simplicity - add extra weight for base tx fee + fee that is paid for the tx size + - // adjusted fee - let single_source_header_submit_tx_weight = single_source_header_submit_call_weight * 3 / 2; - let single_source_header_tx_cost = W::weight_to_fee(&single_source_header_submit_tx_weight); - single_source_header_tx_cost * B::from(expected_source_headers_per_day) - } - - #[test] - fn maximal_balance_decrease_per_day_is_sane() { - // we expect Kusama -> Polkadot relay to be running in mandatory-headers-only mode - // => we expect single header for every Kusama session - let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::< - bp_polkadot::Balance, - bp_polkadot::WeightToFee, - >(bp_kusama::DAYS / bp_kusama::SESSION_LENGTH + 1); - assert!( - MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease, - "Maximal expected loss per day {} is larger than hardcoded {}", - maximal_balance_decrease, - MAXIMAL_BALANCE_DECREASE_PER_DAY, - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs deleted file mode 100644 index 9a71fbe3c62..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/kusama_messages_to_polkadot.rs +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Kusama-to-Polkadot messages sync entrypoint. - -use frame_support::weights::Weight; - -use messages_relay::relay_strategy::MixStrategy; -use relay_kusama_client::Kusama; -use relay_polkadot_client::Polkadot; -use substrate_relay_helper::messages_lane::SubstrateMessageLane; - -/// Description of Kusama -> Polkadot messages bridge. -#[derive(Clone, Debug)] -pub struct KusamaMessagesToPolkadot; -substrate_relay_helper::generate_mocked_receive_message_proof_call_builder!( - KusamaMessagesToPolkadot, - KusamaMessagesToPolkadotReceiveMessagesProofCallBuilder, - relay_polkadot_client::runtime::Call::BridgeKusamaMessages, - relay_polkadot_client::runtime::BridgeKusamaMessagesCall::receive_messages_proof -); -substrate_relay_helper::generate_mocked_receive_message_delivery_proof_call_builder!( - KusamaMessagesToPolkadot, - KusamaMessagesToPolkadotReceiveMessagesDeliveryProofCallBuilder, - relay_kusama_client::runtime::Call::BridgePolkadotMessages, - relay_kusama_client::runtime::BridgePolkadotMessagesCall::receive_messages_delivery_proof -); -substrate_relay_helper::generate_mocked_update_conversion_rate_call_builder!( - Kusama, - KusamaMessagesToPolkadotUpdateConversionRateCallBuilder, - relay_kusama_client::runtime::Call::BridgePolkadotMessages, - relay_kusama_client::runtime::BridgePolkadotMessagesCall::update_pallet_parameter, - relay_kusama_client::runtime::BridgePolkadotMessagesParameter::PolkadotToKusamaConversionRate -); - -impl SubstrateMessageLane for KusamaMessagesToPolkadot { - const SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME); - const TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME); - - const SOURCE_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = - Some(bp_polkadot::KUSAMA_FEE_MULTIPLIER_PARAMETER_NAME); - const TARGET_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = - Some(bp_kusama::POLKADOT_FEE_MULTIPLIER_PARAMETER_NAME); - - const AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = - Some(bp_kusama::TRANSACTION_PAYMENT_PALLET_NAME); - const AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = - Some(bp_polkadot::TRANSACTION_PAYMENT_PALLET_NAME); - - type SourceChain = Kusama; - type TargetChain = Polkadot; - - type SourceTransactionSignScheme = Kusama; - type TargetTransactionSignScheme = Polkadot; - - type ReceiveMessagesProofCallBuilder = KusamaMessagesToPolkadotReceiveMessagesProofCallBuilder; - type ReceiveMessagesDeliveryProofCallBuilder = - KusamaMessagesToPolkadotReceiveMessagesDeliveryProofCallBuilder; - - type TargetToSourceChainConversionRateUpdateBuilder = - KusamaMessagesToPolkadotUpdateConversionRateCallBuilder; - - type RelayStrategy = MixStrategy; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs deleted file mode 100644 index 1fc1e8308ef..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/millau.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau chain specification for CLI. - -use crate::cli::{ - bridge, - encode_call::{self, Call, CliEncodeCall}, - encode_message, - send_message::{self, DispatchFeePayment}, - CliChain, -}; -use anyhow::anyhow; -use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::EncodedOrDecodedCall; -use codec::Decode; -use frame_support::weights::{DispatchInfo, GetDispatchInfo}; -use relay_millau_client::Millau; -use sp_version::RuntimeVersion; - -impl CliEncodeCall for Millau { - fn encode_call(call: &Call) -> anyhow::Result> { - Ok(match call { - Call::Raw { data } => Self::Call::decode(&mut &*data.0)?.into(), - Call::Remark { remark_payload, .. } => - millau_runtime::Call::System(millau_runtime::SystemCall::remark { - remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - }) - .into(), - Call::Transfer { recipient, amount } => - millau_runtime::Call::Balances(millau_runtime::BalancesCall::transfer { - dest: recipient.raw_id(), - value: amount.cast(), - }) - .into(), - Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => - match *bridge_instance_index { - bridge::MILLAU_TO_RIALTO_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - millau_runtime::Call::BridgeRialtoMessages( - millau_runtime::MessagesCall::send_message { - lane_id: lane.0, - payload, - delivery_and_dispatch_fee: fee.cast(), - }, - ) - .into() - }, - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, - }) - } - - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result { - Ok(call.to_decoded()?.get_dispatch_info()) - } -} - -impl CliChain for Millau { - const RUNTIME_VERSION: RuntimeVersion = millau_runtime::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload< - bp_millau::AccountId, - bp_rialto::AccountSigner, - bp_rialto::Signature, - Vec, - >; - - fn ss58_format() -> u16 { - millau_runtime::SS58Prefix::get() as u16 - } - - // TODO [#854|#843] support multiple bridges? - fn encode_message( - message: encode_message::MessagePayload, - ) -> anyhow::Result { - match message { - encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| anyhow!("Failed to decode Millau's MessagePayload: {:?}", e)), - encode_message::MessagePayload::Call { mut call, mut sender, dispatch_weight } => { - type Source = Millau; - type Target = relay_rialto_client::Rialto; - - sender.enforce_chain::(); - let spec_version = Target::RUNTIME_VERSION.spec_version; - let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::( - &mut call, - bridge::MILLAU_TO_RIALTO_INDEX, - ); - let call = Target::encode_call(&call)?; - let dispatch_weight = dispatch_weight.map(Ok).unwrap_or_else(|| { - call.to_decoded().map(|call| call.get_dispatch_info().weight) - })?; - - Ok(send_message::message_payload( - spec_version, - dispatch_weight, - origin, - &call, - DispatchFeePayment::AtSourceChain, - )) - }, - } - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs deleted file mode 100644 index 584f0a9bb1d..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/millau_headers_to_rialto.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau-to-Rialto headers sync entrypoint. - -use substrate_relay_helper::finality_pipeline::{ - DirectSubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, -}; - -/// Description of Millau -> Rialto finalized headers bridge. -#[derive(Clone, Debug)] -pub struct MillauFinalityToRialto; - -impl SubstrateFinalitySyncPipeline for MillauFinalityToRialto { - type SourceChain = relay_millau_client::Millau; - type TargetChain = relay_rialto_client::Rialto; - - type SubmitFinalityProofCallBuilder = DirectSubmitFinalityProofCallBuilder< - Self, - rialto_runtime::Runtime, - rialto_runtime::MillauGrandpaInstance, - >; - type TransactionSignScheme = relay_rialto_client::Rialto; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs deleted file mode 100644 index f20669e6c7a..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/millau_messages_to_rialto.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Millau-to-Rialto messages sync entrypoint. - -use messages_relay::relay_strategy::MixStrategy; -use relay_millau_client::Millau; -use relay_rialto_client::Rialto; -use substrate_relay_helper::messages_lane::{ - DirectReceiveMessagesDeliveryProofCallBuilder, DirectReceiveMessagesProofCallBuilder, - SubstrateMessageLane, -}; - -/// Description of Millau -> Rialto messages bridge. -#[derive(Clone, Debug)] -pub struct MillauMessagesToRialto; -substrate_relay_helper::generate_direct_update_conversion_rate_call_builder!( - Millau, - MillauMessagesToRialtoUpdateConversionRateCallBuilder, - millau_runtime::Runtime, - millau_runtime::WithRialtoMessagesInstance, - millau_runtime::rialto_messages::MillauToRialtoMessagesParameter::RialtoToMillauConversionRate -); - -impl SubstrateMessageLane for MillauMessagesToRialto { - const SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_rialto::MILLAU_TO_RIALTO_CONVERSION_RATE_PARAMETER_NAME); - const TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_millau::RIALTO_TO_MILLAU_CONVERSION_RATE_PARAMETER_NAME); - - const SOURCE_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const TARGET_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - const AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - - type SourceChain = Millau; - type TargetChain = Rialto; - - type SourceTransactionSignScheme = Millau; - type TargetTransactionSignScheme = Rialto; - - type ReceiveMessagesProofCallBuilder = DirectReceiveMessagesProofCallBuilder< - Self, - rialto_runtime::Runtime, - rialto_runtime::WithMillauMessagesInstance, - >; - type ReceiveMessagesDeliveryProofCallBuilder = DirectReceiveMessagesDeliveryProofCallBuilder< - Self, - millau_runtime::Runtime, - millau_runtime::WithRialtoMessagesInstance, - >; - - type TargetToSourceChainConversionRateUpdateBuilder = - MillauMessagesToRialtoUpdateConversionRateCallBuilder; - - type RelayStrategy = MixStrategy; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs b/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs deleted file mode 100644 index 16901143e19..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/mod.rs +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Chain-specific relayer configuration. - -pub mod kusama_headers_to_polkadot; -pub mod kusama_messages_to_polkadot; -pub mod millau_headers_to_rialto; -pub mod millau_messages_to_rialto; -pub mod polkadot_headers_to_kusama; -pub mod polkadot_messages_to_kusama; -pub mod rialto_headers_to_millau; -pub mod rialto_messages_to_millau; -pub mod rococo_headers_to_wococo; -pub mod rococo_messages_to_wococo; -pub mod westend_headers_to_millau; -pub mod wococo_headers_to_rococo; -pub mod wococo_messages_to_rococo; - -mod kusama; -mod millau; -mod polkadot; -mod rialto; -mod rialto_parachain; -mod rococo; -mod westend; -mod wococo; - -#[cfg(test)] -mod tests { - use crate::cli::{encode_call, send_message}; - use bp_messages::source_chain::TargetHeaderChain; - use bp_runtime::Chain as _; - use codec::Encode; - use frame_support::dispatch::GetDispatchInfo; - use relay_millau_client::Millau; - use relay_rialto_client::Rialto; - use relay_substrate_client::{SignParam, TransactionSignScheme, UnsignedTransaction}; - use sp_core::Pair; - use sp_runtime::traits::{IdentifyAccount, Verify}; - - #[test] - fn millau_signature_is_valid_on_rialto() { - let millau_sign = relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); - - let call = - rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { remark: vec![] }); - - let millau_public: bp_millau::AccountSigner = millau_sign.public().into(); - let millau_account_id: bp_millau::AccountId = millau_public.into_account(); - - let digest = millau_runtime::millau_to_rialto_account_ownership_digest( - &call, - millau_account_id, - rialto_runtime::VERSION.spec_version, - ); - - let rialto_signer = - relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); - let signature = rialto_signer.sign(&digest); - - assert!(signature.verify(&digest[..], &rialto_signer.public())); - } - - #[test] - fn rialto_signature_is_valid_on_millau() { - let rialto_sign = relay_rialto_client::SigningParams::from_string("//Dave", None).unwrap(); - - let call = - millau_runtime::Call::System(millau_runtime::SystemCall::remark { remark: vec![] }); - - let rialto_public: bp_rialto::AccountSigner = rialto_sign.public().into(); - let rialto_account_id: bp_rialto::AccountId = rialto_public.into_account(); - - let digest = rialto_runtime::rialto_to_millau_account_ownership_digest( - &call, - rialto_account_id, - millau_runtime::VERSION.spec_version, - ); - - let millau_signer = - relay_millau_client::SigningParams::from_string("//Dave", None).unwrap(); - let signature = millau_signer.sign(&digest); - - assert!(signature.verify(&digest[..], &millau_signer.public())); - } - - #[test] - fn maximal_rialto_to_millau_message_arguments_size_is_computed_correctly() { - use rialto_runtime::millau_messages::Millau; - - let maximal_remark_size = encode_call::compute_maximal_message_arguments_size( - bp_rialto::Rialto::max_extrinsic_size(), - bp_millau::Millau::max_extrinsic_size(), - ); - - let call: millau_runtime::Call = - millau_runtime::SystemCall::remark { remark: vec![42; maximal_remark_size as _] } - .into(); - let payload = send_message::message_payload( - Default::default(), - call.get_dispatch_info().weight, - bp_message_dispatch::CallOrigin::SourceRoot, - &call, - send_message::DispatchFeePayment::AtSourceChain, - ); - assert_eq!(Millau::verify_message(&payload), Ok(())); - - let call: millau_runtime::Call = - millau_runtime::SystemCall::remark { remark: vec![42; (maximal_remark_size + 1) as _] } - .into(); - let payload = send_message::message_payload( - Default::default(), - call.get_dispatch_info().weight, - bp_message_dispatch::CallOrigin::SourceRoot, - &call, - send_message::DispatchFeePayment::AtSourceChain, - ); - assert!(Millau::verify_message(&payload).is_err()); - } - - #[test] - fn maximal_size_remark_to_rialto_is_generated_correctly() { - assert!( - bridge_runtime_common::messages::target::maximal_incoming_message_size( - bp_rialto::Rialto::max_extrinsic_size() - ) > bp_millau::Millau::max_extrinsic_size(), - "We can't actually send maximal messages to Rialto from Millau, because Millau extrinsics can't be that large", - ) - } - - #[test] - fn maximal_rialto_to_millau_message_dispatch_weight_is_computed_correctly() { - use rialto_runtime::millau_messages::Millau; - - let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight( - bp_millau::Millau::max_extrinsic_weight(), - ); - let call: millau_runtime::Call = - rialto_runtime::SystemCall::remark { remark: vec![] }.into(); - - let payload = send_message::message_payload( - Default::default(), - maximal_dispatch_weight, - bp_message_dispatch::CallOrigin::SourceRoot, - &call, - send_message::DispatchFeePayment::AtSourceChain, - ); - assert_eq!(Millau::verify_message(&payload), Ok(())); - - let payload = send_message::message_payload( - Default::default(), - maximal_dispatch_weight + 1, - bp_message_dispatch::CallOrigin::SourceRoot, - &call, - send_message::DispatchFeePayment::AtSourceChain, - ); - assert!(Millau::verify_message(&payload).is_err()); - } - - #[test] - fn maximal_weight_fill_block_to_rialto_is_generated_correctly() { - use millau_runtime::rialto_messages::Rialto; - - let maximal_dispatch_weight = send_message::compute_maximal_message_dispatch_weight( - bp_rialto::Rialto::max_extrinsic_weight(), - ); - let call: rialto_runtime::Call = - millau_runtime::SystemCall::remark { remark: vec![] }.into(); - - let payload = send_message::message_payload( - Default::default(), - maximal_dispatch_weight, - bp_message_dispatch::CallOrigin::SourceRoot, - &call, - send_message::DispatchFeePayment::AtSourceChain, - ); - assert_eq!(Rialto::verify_message(&payload), Ok(())); - - let payload = send_message::message_payload( - Default::default(), - maximal_dispatch_weight + 1, - bp_message_dispatch::CallOrigin::SourceRoot, - &call, - send_message::DispatchFeePayment::AtSourceChain, - ); - assert!(Rialto::verify_message(&payload).is_err()); - } - - #[test] - fn rialto_tx_extra_bytes_constant_is_correct() { - let rialto_call = - rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { remark: vec![] }); - let rialto_tx = Rialto::sign_transaction(SignParam { - spec_version: 1, - transaction_version: 1, - genesis_hash: Default::default(), - signer: sp_keyring::AccountKeyring::Alice.pair(), - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new(rialto_call.clone().into(), 0), - }) - .unwrap(); - let extra_bytes_in_transaction = rialto_tx.encode().len() - rialto_call.encode().len(); - assert!( - bp_rialto::TX_EXTRA_BYTES as usize >= extra_bytes_in_transaction, - "Hardcoded number of extra bytes in Rialto transaction {} is lower than actual value: {}", - bp_rialto::TX_EXTRA_BYTES, - extra_bytes_in_transaction, - ); - } - - #[test] - fn millau_tx_extra_bytes_constant_is_correct() { - let millau_call = - millau_runtime::Call::System(millau_runtime::SystemCall::remark { remark: vec![] }); - let millau_tx = Millau::sign_transaction(SignParam { - spec_version: 0, - transaction_version: 0, - genesis_hash: Default::default(), - signer: sp_keyring::AccountKeyring::Alice.pair(), - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new(millau_call.clone().into(), 0), - }) - .unwrap(); - let extra_bytes_in_transaction = millau_tx.encode().len() - millau_call.encode().len(); - assert!( - bp_millau::TX_EXTRA_BYTES as usize >= extra_bytes_in_transaction, - "Hardcoded number of extra bytes in Millau transaction {} is lower than actual value: {}", - bp_millau::TX_EXTRA_BYTES, - extra_bytes_in_transaction, - ); - } -} - -#[cfg(test)] -mod rococo_tests { - use bp_header_chain::justification::GrandpaJustification; - use codec::Encode; - - #[test] - fn scale_compatibility_of_bridges_call() { - // given - let header = sp_runtime::generic::Header { - parent_hash: Default::default(), - number: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: sp_runtime::generic::Digest { logs: vec![] }, - }; - - let justification = GrandpaJustification { - round: 0, - commit: finality_grandpa::Commit { - target_hash: Default::default(), - target_number: Default::default(), - precommits: vec![], - }, - votes_ancestries: vec![], - }; - - let actual = relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof( - Box::new(header.clone()), - justification.clone(), - ); - let expected = - millau_runtime::BridgeGrandpaCall::::submit_finality_proof { - finality_target: Box::new(header), - justification, - }; - - // when - let actual_encoded = actual.encode(); - let expected_encoded = expected.encode(); - - // then - assert_eq!( - actual_encoded, expected_encoded, - "\n\nEncoding difference.\nGot {:#?} \nExpected: {:#?}", - actual, expected - ); - } -} - -#[cfg(test)] -mod westend_tests { - use bp_header_chain::justification::GrandpaJustification; - use codec::Encode; - - #[test] - fn scale_compatibility_of_bridges_call() { - // given - let header = sp_runtime::generic::Header { - parent_hash: Default::default(), - number: Default::default(), - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: sp_runtime::generic::Digest { logs: vec![] }, - }; - - let justification = GrandpaJustification { - round: 0, - commit: finality_grandpa::Commit { - target_hash: Default::default(), - target_number: Default::default(), - precommits: vec![], - }, - votes_ancestries: vec![], - }; - - let actual = relay_kusama_client::runtime::BridgePolkadotGrandpaCall::submit_finality_proof( - Box::new(header.clone()), - justification.clone(), - ); - let expected = - millau_runtime::BridgeGrandpaCall::::submit_finality_proof { - finality_target: Box::new(header), - justification, - }; - - // when - let actual_encoded = actual.encode(); - let expected_encoded = expected.encode(); - - // then - assert_eq!( - actual_encoded, expected_encoded, - "\n\nEncoding difference.\nGot {:#?} \nExpected: {:#?}", - actual, expected - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs deleted file mode 100644 index 7ae1cbc4777..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot.rs +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use anyhow::anyhow; -use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::EncodedOrDecodedCall; -use codec::Decode; -use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; -use relay_polkadot_client::Polkadot; -use sp_version::RuntimeVersion; - -use crate::cli::{ - bridge, - encode_call::{self, Call, CliEncodeCall}, - encode_message, - send_message::{self, DispatchFeePayment}, - CliChain, -}; - -/// Weight of the `system::remark` call at Polkadot. -/// -/// This weight is larger (x2) than actual weight at current Polkadot runtime to avoid unsuccessful -/// calls in the future. But since it is used only in tests (and on test chains), this is ok. -pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000; - -impl CliEncodeCall for Polkadot { - fn encode_call(call: &Call) -> anyhow::Result> { - Ok(match call { - Call::Raw { data } => EncodedOrDecodedCall::Encoded(data.0.clone()), - Call::Remark { remark_payload, .. } => relay_polkadot_client::runtime::Call::System( - relay_polkadot_client::runtime::SystemCall::remark( - remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - ), - ) - .into(), - Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => - match *bridge_instance_index { - bridge::POLKADOT_TO_KUSAMA_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - relay_polkadot_client::runtime::Call::BridgeKusamaMessages( - relay_polkadot_client::runtime::BridgeKusamaMessagesCall::send_message( - lane.0, payload, fee.0, - ), - ) - .into() - }, - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, - _ => anyhow::bail!("Unsupported Polkadot call: {:?}", call), - }) - } - - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result { - match *call { - EncodedOrDecodedCall::Decoded(relay_polkadot_client::runtime::Call::System( - relay_polkadot_client::runtime::SystemCall::remark(_), - )) => Ok(DispatchInfo { - weight: crate::chains::polkadot::SYSTEM_REMARK_CALL_WEIGHT, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }), - _ => anyhow::bail!("Unsupported Polkadot call: {:?}", call), - } - } -} - -impl CliChain for Polkadot { - const RUNTIME_VERSION: RuntimeVersion = bp_polkadot::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload< - bp_polkadot::AccountId, - bp_kusama::AccountPublic, - bp_kusama::Signature, - Vec, - >; - - fn ss58_format() -> u16 { - sp_core::crypto::Ss58AddressFormat::from( - sp_core::crypto::Ss58AddressFormatRegistry::PolkadotAccount, - ) - .into() - } - - fn encode_message( - message: encode_message::MessagePayload, - ) -> anyhow::Result { - match message { - encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| anyhow!("Failed to decode Polkadot's MessagePayload: {:?}", e)), - encode_message::MessagePayload::Call { mut call, mut sender, dispatch_weight } => { - type Source = Polkadot; - type Target = relay_kusama_client::Kusama; - - sender.enforce_chain::(); - let spec_version = Target::RUNTIME_VERSION.spec_version; - let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::( - &mut call, - bridge::POLKADOT_TO_KUSAMA_INDEX, - ); - let call = Target::encode_call(&call)?; - let dispatch_weight = dispatch_weight.map(Ok).unwrap_or_else(|| { - Err(anyhow::format_err!( - "Please specify dispatch weight of the encoded Kusama call" - )) - })?; - - Ok(send_message::message_payload( - spec_version, - dispatch_weight, - origin, - &call, - DispatchFeePayment::AtSourceChain, - )) - }, - } - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs deleted file mode 100644 index 6d118b07caa..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_headers_to_kusama.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Polkadot-to-Kusama headers sync entrypoint. - -use async_trait::async_trait; -use relay_kusama_client::Kusama; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; - -/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat -/// relay as gone wild. -/// -/// Actual value, returned by `maximal_balance_decrease_per_day_is_sane` test is approximately 0.001 -/// KSM, and initial value of this constant was rounded up to 0.1 KSM. But for actual Kusama <> -/// Polkadot deployment we'll be using the same account for delivering finality (free for mandatory -/// headers) and messages. It means that we can't predict maximal loss. But to protect funds against -/// relay/deployment issues, let's limit it so something that is much larger than this estimation - -/// e.g. to 2 KSM. -// TODO: https://github.com/paritytech/parity-bridges-common/issues/1307 -pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_kusama::Balance = 2 * 1_000_000_000_000; - -/// Description of Polkadot -> Kusama finalized headers bridge. -#[derive(Clone, Debug)] -pub struct PolkadotFinalityToKusama; -substrate_relay_helper::generate_mocked_submit_finality_proof_call_builder!( - PolkadotFinalityToKusama, - PolkadotFinalityToKusamaCallBuilder, - relay_kusama_client::runtime::Call::BridgePolkadotGrandpa, - relay_kusama_client::runtime::BridgePolkadotGrandpaCall::submit_finality_proof -); - -#[async_trait] -impl SubstrateFinalitySyncPipeline for PolkadotFinalityToKusama { - type SourceChain = relay_polkadot_client::Polkadot; - type TargetChain = Kusama; - - type SubmitFinalityProofCallBuilder = PolkadotFinalityToKusamaCallBuilder; - type TransactionSignScheme = Kusama; - - async fn start_relay_guards( - target_client: &relay_substrate_client::Client, - transaction_params: &TransactionParams, - enable_version_guard: bool, - ) -> relay_substrate_client::Result<()> { - substrate_relay_helper::finality_guards::start::( - target_client, - transaction_params, - enable_version_guard, - MAXIMAL_BALANCE_DECREASE_PER_DAY, - ) - .await - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::chains::kusama_headers_to_polkadot::tests::compute_maximal_balance_decrease_per_day; - - #[test] - fn maximal_balance_decrease_per_day_is_sane() { - // we expect Polkadot -> Kusama relay to be running in mandatory-headers-only mode - // => we expect single header for every Polkadot session - let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::< - bp_kusama::Balance, - bp_kusama::WeightToFee, - >(bp_polkadot::DAYS / bp_polkadot::SESSION_LENGTH + 1); - assert!( - MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease, - "Maximal expected loss per day {} is larger than hardcoded {}", - maximal_balance_decrease, - MAXIMAL_BALANCE_DECREASE_PER_DAY, - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs b/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs deleted file mode 100644 index 9c4a4640eb9..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/polkadot_messages_to_kusama.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Polkadot-to-Kusama messages sync entrypoint. - -use frame_support::weights::Weight; -use messages_relay::relay_strategy::MixStrategy; -use relay_kusama_client::Kusama; -use relay_polkadot_client::Polkadot; -use substrate_relay_helper::messages_lane::SubstrateMessageLane; - -/// Description of Polkadot -> Kusama messages bridge. -#[derive(Clone, Debug)] -pub struct PolkadotMessagesToKusama; -substrate_relay_helper::generate_mocked_receive_message_proof_call_builder!( - PolkadotMessagesToKusama, - PolkadotMessagesToKusamaReceiveMessagesProofCallBuilder, - relay_kusama_client::runtime::Call::BridgePolkadotMessages, - relay_kusama_client::runtime::BridgePolkadotMessagesCall::receive_messages_proof -); -substrate_relay_helper::generate_mocked_receive_message_delivery_proof_call_builder!( - PolkadotMessagesToKusama, - PolkadotMessagesToKusamaReceiveMessagesDeliveryProofCallBuilder, - relay_polkadot_client::runtime::Call::BridgeKusamaMessages, - relay_polkadot_client::runtime::BridgeKusamaMessagesCall::receive_messages_delivery_proof -); -substrate_relay_helper::generate_mocked_update_conversion_rate_call_builder!( - Polkadot, - PolkadotMessagesToKusamaUpdateConversionRateCallBuilder, - relay_polkadot_client::runtime::Call::BridgeKusamaMessages, - relay_polkadot_client::runtime::BridgeKusamaMessagesCall::update_pallet_parameter, - relay_polkadot_client::runtime::BridgeKusamaMessagesParameter::KusamaToPolkadotConversionRate -); - -impl SubstrateMessageLane for PolkadotMessagesToKusama { - const SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_kusama::POLKADOT_TO_KUSAMA_CONVERSION_RATE_PARAMETER_NAME); - const TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_polkadot::KUSAMA_TO_POLKADOT_CONVERSION_RATE_PARAMETER_NAME); - - const SOURCE_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = - Some(bp_kusama::POLKADOT_FEE_MULTIPLIER_PARAMETER_NAME); - const TARGET_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = - Some(bp_polkadot::KUSAMA_FEE_MULTIPLIER_PARAMETER_NAME); - - const AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = - Some(bp_polkadot::TRANSACTION_PAYMENT_PALLET_NAME); - const AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = - Some(bp_kusama::TRANSACTION_PAYMENT_PALLET_NAME); - - type SourceChain = Polkadot; - type TargetChain = Kusama; - - type SourceTransactionSignScheme = Polkadot; - type TargetTransactionSignScheme = Kusama; - - type ReceiveMessagesProofCallBuilder = PolkadotMessagesToKusamaReceiveMessagesProofCallBuilder; - type ReceiveMessagesDeliveryProofCallBuilder = - PolkadotMessagesToKusamaReceiveMessagesDeliveryProofCallBuilder; - - type TargetToSourceChainConversionRateUpdateBuilder = - PolkadotMessagesToKusamaUpdateConversionRateCallBuilder; - - type RelayStrategy = MixStrategy; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs deleted file mode 100644 index 8f26a64a4e3..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto.rs +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto chain specification for CLI. - -use crate::cli::{ - bridge, - encode_call::{self, Call, CliEncodeCall}, - encode_message, - send_message::{self, DispatchFeePayment}, - CliChain, -}; -use anyhow::anyhow; -use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::EncodedOrDecodedCall; -use codec::Decode; -use frame_support::weights::{DispatchInfo, GetDispatchInfo}; -use relay_rialto_client::Rialto; -use sp_version::RuntimeVersion; - -impl CliEncodeCall for Rialto { - fn encode_call(call: &Call) -> anyhow::Result> { - Ok(match call { - Call::Raw { data } => Self::Call::decode(&mut &*data.0)?.into(), - Call::Remark { remark_payload, .. } => - rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { - remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - }) - .into(), - Call::Transfer { recipient, amount } => - rialto_runtime::Call::Balances(rialto_runtime::BalancesCall::transfer { - dest: recipient.raw_id().into(), - value: amount.0, - }) - .into(), - Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => - match *bridge_instance_index { - bridge::RIALTO_TO_MILLAU_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - rialto_runtime::Call::BridgeMillauMessages( - rialto_runtime::MessagesCall::send_message { - lane_id: lane.0, - payload, - delivery_and_dispatch_fee: fee.0, - }, - ) - .into() - }, - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, - }) - } - - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result { - Ok(call.to_decoded()?.get_dispatch_info()) - } -} - -impl CliChain for Rialto { - const RUNTIME_VERSION: RuntimeVersion = rialto_runtime::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload< - bp_rialto::AccountId, - bp_millau::AccountSigner, - bp_millau::Signature, - Vec, - >; - - fn ss58_format() -> u16 { - rialto_runtime::SS58Prefix::get() as u16 - } - - fn encode_message( - message: encode_message::MessagePayload, - ) -> anyhow::Result { - match message { - encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| anyhow!("Failed to decode Rialto's MessagePayload: {:?}", e)), - encode_message::MessagePayload::Call { mut call, mut sender, dispatch_weight } => { - type Source = Rialto; - type Target = relay_millau_client::Millau; - - sender.enforce_chain::(); - let spec_version = Target::RUNTIME_VERSION.spec_version; - let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::( - &mut call, - bridge::RIALTO_TO_MILLAU_INDEX, - ); - let call = Target::encode_call(&call)?; - let dispatch_weight = dispatch_weight.map(Ok).unwrap_or_else(|| { - call.to_decoded().map(|call| call.get_dispatch_info().weight) - })?; - - Ok(send_message::message_payload( - spec_version, - dispatch_weight, - origin, - &call, - DispatchFeePayment::AtSourceChain, - )) - }, - } - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs deleted file mode 100644 index a433f3562a7..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_headers_to_millau.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-to-Millau headers sync entrypoint. - -use substrate_relay_helper::finality_pipeline::{ - DirectSubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, -}; - -/// Description of Millau -> Rialto finalized headers bridge. -#[derive(Clone, Debug)] -pub struct RialtoFinalityToMillau; - -impl SubstrateFinalitySyncPipeline for RialtoFinalityToMillau { - type SourceChain = relay_rialto_client::Rialto; - type TargetChain = relay_millau_client::Millau; - - type SubmitFinalityProofCallBuilder = DirectSubmitFinalityProofCallBuilder< - Self, - millau_runtime::Runtime, - millau_runtime::RialtoGrandpaInstance, - >; - type TransactionSignScheme = relay_millau_client::Millau; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs deleted file mode 100644 index d34f4714644..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_messages_to_millau.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto-to-Millau messages sync entrypoint. - -use messages_relay::relay_strategy::MixStrategy; -use relay_millau_client::Millau; -use relay_rialto_client::Rialto; -use substrate_relay_helper::messages_lane::{ - DirectReceiveMessagesDeliveryProofCallBuilder, DirectReceiveMessagesProofCallBuilder, - SubstrateMessageLane, -}; - -/// Description of Rialto -> Millau messages bridge. -#[derive(Clone, Debug)] -pub struct RialtoMessagesToMillau; -substrate_relay_helper::generate_direct_update_conversion_rate_call_builder!( - Rialto, - RialtoMessagesToMillauUpdateConversionRateCallBuilder, - rialto_runtime::Runtime, - rialto_runtime::WithMillauMessagesInstance, - rialto_runtime::millau_messages::RialtoToMillauMessagesParameter::MillauToRialtoConversionRate -); - -impl SubstrateMessageLane for RialtoMessagesToMillau { - const SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_millau::RIALTO_TO_MILLAU_CONVERSION_RATE_PARAMETER_NAME); - const TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = - Some(bp_rialto::MILLAU_TO_RIALTO_CONVERSION_RATE_PARAMETER_NAME); - - const SOURCE_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const TARGET_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - const AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - - type SourceChain = Rialto; - type TargetChain = Millau; - - type SourceTransactionSignScheme = Rialto; - type TargetTransactionSignScheme = Millau; - - type ReceiveMessagesProofCallBuilder = DirectReceiveMessagesProofCallBuilder< - Self, - millau_runtime::Runtime, - millau_runtime::WithRialtoMessagesInstance, - >; - type ReceiveMessagesDeliveryProofCallBuilder = DirectReceiveMessagesDeliveryProofCallBuilder< - Self, - rialto_runtime::Runtime, - rialto_runtime::WithMillauMessagesInstance, - >; - - type TargetToSourceChainConversionRateUpdateBuilder = - RialtoMessagesToMillauUpdateConversionRateCallBuilder; - - type RelayStrategy = MixStrategy; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs deleted file mode 100644 index 0ed39faa543..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rialto_parachain.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rialto parachain specification for CLI. - -use crate::cli::{ - encode_call::{Call, CliEncodeCall}, - encode_message, CliChain, -}; -use bp_message_dispatch::MessagePayload; -use bp_runtime::EncodedOrDecodedCall; -use codec::Decode; -use frame_support::weights::{DispatchInfo, GetDispatchInfo}; -use relay_rialto_parachain_client::RialtoParachain; -use sp_version::RuntimeVersion; - -impl CliEncodeCall for RialtoParachain { - fn encode_call(call: &Call) -> anyhow::Result> { - Ok(match call { - Call::Raw { data } => Self::Call::decode(&mut &*data.0)?.into(), - Call::Remark { remark_payload, .. } => rialto_parachain_runtime::Call::System( - rialto_parachain_runtime::SystemCall::remark { - remark: remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - }, - ) - .into(), - Call::Transfer { recipient, amount } => rialto_parachain_runtime::Call::Balances( - rialto_parachain_runtime::BalancesCall::transfer { - dest: recipient.raw_id().into(), - value: amount.0, - }, - ) - .into(), - Call::BridgeSendMessage { .. } => { - anyhow::bail!("Bridge messages are not (yet) supported here",) - }, - }) - } - - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result { - Ok(call.to_decoded()?.get_dispatch_info()) - } -} - -impl CliChain for RialtoParachain { - const RUNTIME_VERSION: RuntimeVersion = rialto_parachain_runtime::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload< - bp_rialto_parachain::AccountId, - bp_millau::AccountSigner, - bp_millau::Signature, - Vec, - >; - - fn ss58_format() -> u16 { - rialto_parachain_runtime::SS58Prefix::get() as u16 - } - - fn encode_message( - _message: encode_message::MessagePayload, - ) -> anyhow::Result { - anyhow::bail!("Not supported") - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs deleted file mode 100644 index ceef4c1f532..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo.rs +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use anyhow::anyhow; -use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::EncodedOrDecodedCall; -use codec::Decode; -use frame_support::weights::{DispatchClass, DispatchInfo, Pays, Weight}; -use relay_rococo_client::Rococo; -use sp_version::RuntimeVersion; - -use crate::cli::{ - bridge, - encode_call::{self, Call, CliEncodeCall}, - encode_message, - send_message::{self, DispatchFeePayment}, - CliChain, -}; - -/// Weight of the `system::remark` call at Rococo. -/// -/// This weight is larger (x2) than actual weight at current Rococo runtime to avoid unsuccessful -/// calls in the future. But since it is used only in tests (and on test chains), this is ok. -pub(crate) const SYSTEM_REMARK_CALL_WEIGHT: Weight = 2 * 1_345_000; - -impl CliEncodeCall for Rococo { - fn encode_call(call: &Call) -> anyhow::Result> { - Ok(match call { - Call::Raw { data } => EncodedOrDecodedCall::Encoded(data.0.clone()), - Call::Remark { remark_payload, .. } => relay_rococo_client::runtime::Call::System( - relay_rococo_client::runtime::SystemCall::remark( - remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - ), - ) - .into(), - Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => - match *bridge_instance_index { - bridge::ROCOCO_TO_WOCOCO_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - relay_rococo_client::runtime::Call::BridgeWococoMessages( - relay_rococo_client::runtime::BridgeWococoMessagesCall::send_message( - lane.0, payload, fee.0, - ), - ) - .into() - }, - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, - _ => anyhow::bail!("The call is not supported"), - }) - } - - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result { - match *call { - EncodedOrDecodedCall::Decoded(relay_rococo_client::runtime::Call::System( - relay_rococo_client::runtime::SystemCall::remark(_), - )) => Ok(DispatchInfo { - weight: SYSTEM_REMARK_CALL_WEIGHT, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }), - _ => anyhow::bail!("Unsupported Rococo call: {:?}", call), - } - } -} - -impl CliChain for Rococo { - const RUNTIME_VERSION: RuntimeVersion = bp_rococo::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload< - bp_rococo::AccountId, - bp_wococo::AccountPublic, - bp_wococo::Signature, - Vec, - >; - - fn ss58_format() -> u16 { - 42 - } - - fn encode_message( - message: encode_message::MessagePayload, - ) -> anyhow::Result { - match message { - encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| anyhow!("Failed to decode Rococo's MessagePayload: {:?}", e)), - encode_message::MessagePayload::Call { mut call, mut sender, dispatch_weight } => { - type Source = Rococo; - type Target = relay_wococo_client::Wococo; - - sender.enforce_chain::(); - let spec_version = Target::RUNTIME_VERSION.spec_version; - let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::( - &mut call, - bridge::ROCOCO_TO_WOCOCO_INDEX, - ); - let call = Target::encode_call(&call)?; - let dispatch_weight = dispatch_weight.map(Ok).unwrap_or_else(|| { - Err(anyhow::format_err!( - "Please specify dispatch weight of the encoded Wococo call" - )) - })?; - - Ok(send_message::message_payload( - spec_version, - dispatch_weight, - origin, - &call, - DispatchFeePayment::AtSourceChain, - )) - }, - } - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs deleted file mode 100644 index bb66a7422d3..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_headers_to_wococo.rs +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rococo-to-Wococo headers sync entrypoint. - -use crate::chains::wococo_headers_to_rococo::MAXIMAL_BALANCE_DECREASE_PER_DAY; - -use async_trait::async_trait; -use relay_wococo_client::Wococo; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; - -/// Description of Rococo -> Wococo finalized headers bridge. -#[derive(Clone, Debug)] -pub struct RococoFinalityToWococo; -substrate_relay_helper::generate_mocked_submit_finality_proof_call_builder!( - RococoFinalityToWococo, - RococoFinalityToWococoCallBuilder, - relay_wococo_client::runtime::Call::BridgeGrandpaRococo, - relay_wococo_client::runtime::BridgeGrandpaRococoCall::submit_finality_proof -); - -#[async_trait] -impl SubstrateFinalitySyncPipeline for RococoFinalityToWococo { - type SourceChain = relay_rococo_client::Rococo; - type TargetChain = Wococo; - - type SubmitFinalityProofCallBuilder = RococoFinalityToWococoCallBuilder; - type TransactionSignScheme = Wococo; - - async fn start_relay_guards( - target_client: &relay_substrate_client::Client, - transaction_params: &TransactionParams, - enable_version_guard: bool, - ) -> relay_substrate_client::Result<()> { - substrate_relay_helper::finality_guards::start::( - target_client, - transaction_params, - enable_version_guard, - MAXIMAL_BALANCE_DECREASE_PER_DAY, - ) - .await - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs deleted file mode 100644 index 4e67c87fa8c..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/rococo_messages_to_wococo.rs +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rococo-to-Wococo messages sync entrypoint. - -use frame_support::weights::Weight; -use messages_relay::relay_strategy::MixStrategy; -use relay_rococo_client::Rococo; -use relay_wococo_client::Wococo; -use substrate_relay_helper::messages_lane::SubstrateMessageLane; - -/// Description of Rococo -> Wococo messages bridge. -#[derive(Clone, Debug)] -pub struct RococoMessagesToWococo; -substrate_relay_helper::generate_mocked_receive_message_proof_call_builder!( - RococoMessagesToWococo, - RococoMessagesToWococoReceiveMessagesProofCallBuilder, - relay_wococo_client::runtime::Call::BridgeRococoMessages, - relay_wococo_client::runtime::BridgeRococoMessagesCall::receive_messages_proof -); -substrate_relay_helper::generate_mocked_receive_message_delivery_proof_call_builder!( - RococoMessagesToWococo, - RococoMessagesToWococoReceiveMessagesDeliveryProofCallBuilder, - relay_rococo_client::runtime::Call::BridgeWococoMessages, - relay_rococo_client::runtime::BridgeWococoMessagesCall::receive_messages_delivery_proof -); - -impl SubstrateMessageLane for RococoMessagesToWococo { - const SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = None; - const TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = None; - - const SOURCE_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const TARGET_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - const AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - - type SourceChain = Rococo; - type TargetChain = Wococo; - - type SourceTransactionSignScheme = Rococo; - type TargetTransactionSignScheme = Wococo; - - type ReceiveMessagesProofCallBuilder = RococoMessagesToWococoReceiveMessagesProofCallBuilder; - type ReceiveMessagesDeliveryProofCallBuilder = - RococoMessagesToWococoReceiveMessagesDeliveryProofCallBuilder; - - type TargetToSourceChainConversionRateUpdateBuilder = (); - - type RelayStrategy = MixStrategy; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs deleted file mode 100644 index 8d3b5db9ab3..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/westend.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Westend chain specification for CLI. - -use crate::cli::{encode_message, CliChain}; -use anyhow::anyhow; -use relay_westend_client::Westend; -use sp_version::RuntimeVersion; - -impl CliChain for Westend { - const RUNTIME_VERSION: RuntimeVersion = bp_westend::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = (); - - fn ss58_format() -> u16 { - sp_core::crypto::Ss58AddressFormat::from( - sp_core::crypto::Ss58AddressFormatRegistry::SubstrateAccount, - ) - .into() - } - - fn encode_message( - _message: encode_message::MessagePayload, - ) -> anyhow::Result { - Err(anyhow!("Sending messages from Westend is not yet supported.")) - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs b/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs deleted file mode 100644 index 2ec20a027ff..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/westend_headers_to_millau.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Westend-to-Millau headers sync entrypoint. - -use substrate_relay_helper::finality_pipeline::{ - DirectSubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, -}; - -/// Description of Westend -> Millau finalized headers bridge. -#[derive(Clone, Debug)] -pub struct WestendFinalityToMillau; - -impl SubstrateFinalitySyncPipeline for WestendFinalityToMillau { - type SourceChain = relay_westend_client::Westend; - type TargetChain = relay_millau_client::Millau; - - type SubmitFinalityProofCallBuilder = DirectSubmitFinalityProofCallBuilder< - Self, - millau_runtime::Runtime, - millau_runtime::WestendGrandpaInstance, - >; - type TransactionSignScheme = relay_millau_client::Millau; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs deleted file mode 100644 index 46dec2a3c90..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo.rs +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use anyhow::anyhow; -use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::EncodedOrDecodedCall; -use codec::Decode; -use frame_support::weights::{DispatchClass, DispatchInfo, Pays}; -use relay_wococo_client::Wococo; -use sp_version::RuntimeVersion; - -use crate::cli::{ - bridge, - encode_call::{self, Call, CliEncodeCall}, - encode_message, - send_message::{self, DispatchFeePayment}, - CliChain, -}; - -impl CliEncodeCall for Wococo { - fn encode_call(call: &Call) -> anyhow::Result> { - Ok(match call { - Call::Raw { data } => EncodedOrDecodedCall::Encoded(data.0.clone()), - Call::Remark { remark_payload, .. } => relay_wococo_client::runtime::Call::System( - relay_wococo_client::runtime::SystemCall::remark( - remark_payload.as_ref().map(|x| x.0.clone()).unwrap_or_default(), - ), - ) - .into(), - Call::BridgeSendMessage { lane, payload, fee, bridge_instance_index } => - match *bridge_instance_index { - bridge::WOCOCO_TO_ROCOCO_INDEX => { - let payload = Decode::decode(&mut &*payload.0)?; - relay_wococo_client::runtime::Call::BridgeRococoMessages( - relay_wococo_client::runtime::BridgeRococoMessagesCall::send_message( - lane.0, payload, fee.0, - ), - ) - .into() - }, - _ => anyhow::bail!( - "Unsupported target bridge pallet with instance index: {}", - bridge_instance_index - ), - }, - _ => anyhow::bail!("The call is not supported"), - }) - } - - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result { - match *call { - EncodedOrDecodedCall::Decoded(relay_wococo_client::runtime::Call::System( - relay_wococo_client::runtime::SystemCall::remark(_), - )) => Ok(DispatchInfo { - weight: crate::chains::rococo::SYSTEM_REMARK_CALL_WEIGHT, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }), - _ => anyhow::bail!("Unsupported Wococo call: {:?}", call), - } - } -} - -impl CliChain for Wococo { - const RUNTIME_VERSION: RuntimeVersion = bp_wococo::VERSION; - - type KeyPair = sp_core::sr25519::Pair; - type MessagePayload = MessagePayload< - bp_wococo::AccountId, - bp_rococo::AccountPublic, - bp_rococo::Signature, - Vec, - >; - - fn ss58_format() -> u16 { - 42 - } - - fn encode_message( - message: encode_message::MessagePayload, - ) -> anyhow::Result { - match message { - encode_message::MessagePayload::Raw { data } => MessagePayload::decode(&mut &*data.0) - .map_err(|e| anyhow!("Failed to decode Wococo's MessagePayload: {:?}", e)), - encode_message::MessagePayload::Call { mut call, mut sender, dispatch_weight } => { - type Source = Wococo; - type Target = relay_rococo_client::Rococo; - - sender.enforce_chain::(); - let spec_version = Target::RUNTIME_VERSION.spec_version; - let origin = CallOrigin::SourceAccount(sender.raw_id()); - encode_call::preprocess_call::( - &mut call, - bridge::WOCOCO_TO_ROCOCO_INDEX, - ); - let call = Target::encode_call(&call)?; - let dispatch_weight = dispatch_weight.map(Ok).unwrap_or_else(|| { - Err(anyhow::format_err!( - "Please specify dispatch weight of the encoded Rococo call" - )) - })?; - - Ok(send_message::message_payload( - spec_version, - dispatch_weight, - origin, - &call, - DispatchFeePayment::AtSourceChain, - )) - }, - } - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs deleted file mode 100644 index a7bff595188..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_headers_to_rococo.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Wococo-to-Rococo headers sync entrypoint. - -use async_trait::async_trait; -use relay_rococo_client::Rococo; -use substrate_relay_helper::{finality_pipeline::SubstrateFinalitySyncPipeline, TransactionParams}; - -/// Maximal saturating difference between `balance(now)` and `balance(now-24h)` to treat -/// relay as gone wild. -/// -/// See `maximal_balance_decrease_per_day_is_sane` test for details. -/// Note that this is in plancks, so this corresponds to `1500 UNITS`. -pub(crate) const MAXIMAL_BALANCE_DECREASE_PER_DAY: bp_rococo::Balance = 1_500_000_000_000_000; - -/// Description of Wococo -> Rococo finalized headers bridge. -#[derive(Clone, Debug)] -pub struct WococoFinalityToRococo; -substrate_relay_helper::generate_mocked_submit_finality_proof_call_builder!( - WococoFinalityToRococo, - WococoFinalityToRococoCallBuilder, - relay_rococo_client::runtime::Call::BridgeGrandpaWococo, - relay_rococo_client::runtime::BridgeGrandpaWococoCall::submit_finality_proof -); - -#[async_trait] -impl SubstrateFinalitySyncPipeline for WococoFinalityToRococo { - type SourceChain = relay_wococo_client::Wococo; - type TargetChain = Rococo; - - type SubmitFinalityProofCallBuilder = WococoFinalityToRococoCallBuilder; - type TransactionSignScheme = Rococo; - - async fn start_relay_guards( - target_client: &relay_substrate_client::Client, - transaction_params: &TransactionParams, - enable_version_guard: bool, - ) -> relay_substrate_client::Result<()> { - substrate_relay_helper::finality_guards::start::( - target_client, - transaction_params, - enable_version_guard, - MAXIMAL_BALANCE_DECREASE_PER_DAY, - ) - .await - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::chains::kusama_headers_to_polkadot::tests::compute_maximal_balance_decrease_per_day; - - #[test] - fn maximal_balance_decrease_per_day_is_sane() { - // we expect Wococo -> Rococo relay to be running in all-headers mode - let maximal_balance_decrease = compute_maximal_balance_decrease_per_day::< - bp_kusama::Balance, - bp_kusama::WeightToFee, - >(bp_wococo::DAYS); - assert!( - MAXIMAL_BALANCE_DECREASE_PER_DAY >= maximal_balance_decrease, - "Maximal expected loss per day {} is larger than hardcoded {}", - maximal_balance_decrease, - MAXIMAL_BALANCE_DECREASE_PER_DAY, - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs b/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs deleted file mode 100644 index 2c44803f2c0..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/chains/wococo_messages_to_rococo.rs +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Wococo-to-Rococo messages sync entrypoint. - -use frame_support::weights::Weight; - -use messages_relay::relay_strategy::MixStrategy; -use relay_rococo_client::Rococo; -use relay_wococo_client::Wococo; -use substrate_relay_helper::messages_lane::SubstrateMessageLane; - -/// Description of Wococo -> Rococo messages bridge. -#[derive(Clone, Debug)] -pub struct WococoMessagesToRococo; -substrate_relay_helper::generate_mocked_receive_message_proof_call_builder!( - WococoMessagesToRococo, - WococoMessagesToRococoReceiveMessagesProofCallBuilder, - relay_rococo_client::runtime::Call::BridgeWococoMessages, - relay_rococo_client::runtime::BridgeWococoMessagesCall::receive_messages_proof -); -substrate_relay_helper::generate_mocked_receive_message_delivery_proof_call_builder!( - WococoMessagesToRococo, - WococoMessagesToRococoReceiveMessagesDeliveryProofCallBuilder, - relay_wococo_client::runtime::Call::BridgeRococoMessages, - relay_wococo_client::runtime::BridgeRococoMessagesCall::receive_messages_delivery_proof -); - -impl SubstrateMessageLane for WococoMessagesToRococo { - const SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = None; - const TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str> = None; - - const SOURCE_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const TARGET_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str> = None; - const AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - const AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str> = None; - - type SourceChain = Wococo; - type TargetChain = Rococo; - - type SourceTransactionSignScheme = Wococo; - type TargetTransactionSignScheme = Rococo; - - type ReceiveMessagesProofCallBuilder = WococoMessagesToRococoReceiveMessagesProofCallBuilder; - type ReceiveMessagesDeliveryProofCallBuilder = - WococoMessagesToRococoReceiveMessagesDeliveryProofCallBuilder; - - type TargetToSourceChainConversionRateUpdateBuilder = (); - - type RelayStrategy = MixStrategy; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs deleted file mode 100644 index 2eb836a84a7..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/bridge.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use strum::{EnumString, EnumVariantNames}; - -#[derive(Debug, PartialEq, Eq, EnumString, EnumVariantNames)] -#[strum(serialize_all = "kebab_case")] -/// Supported full bridges (headers + messages). -pub enum FullBridge { - MillauToRialto, - RialtoToMillau, - RococoToWococo, - WococoToRococo, - KusamaToPolkadot, - PolkadotToKusama, -} - -impl FullBridge { - /// Return instance index of the bridge pallet in source runtime. - pub fn bridge_instance_index(&self) -> u8 { - match self { - Self::MillauToRialto => MILLAU_TO_RIALTO_INDEX, - Self::RialtoToMillau => RIALTO_TO_MILLAU_INDEX, - Self::RococoToWococo => ROCOCO_TO_WOCOCO_INDEX, - Self::WococoToRococo => WOCOCO_TO_ROCOCO_INDEX, - Self::KusamaToPolkadot => KUSAMA_TO_POLKADOT_INDEX, - Self::PolkadotToKusama => POLKADOT_TO_KUSAMA_INDEX, - } - } -} - -pub const RIALTO_TO_MILLAU_INDEX: u8 = 0; -pub const MILLAU_TO_RIALTO_INDEX: u8 = 0; -pub const ROCOCO_TO_WOCOCO_INDEX: u8 = 0; -pub const WOCOCO_TO_ROCOCO_INDEX: u8 = 0; -pub const KUSAMA_TO_POLKADOT_INDEX: u8 = 0; -pub const POLKADOT_TO_KUSAMA_INDEX: u8 = 0; - -/// The macro allows executing bridge-specific code without going fully generic. -/// -/// It matches on the [`FullBridge`] enum, sets bridge-specific types or imports and injects -/// the `$generic` code at every variant. -#[macro_export] -macro_rules! select_full_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - FullBridge::MillauToRialto => { - type Source = relay_millau_client::Millau; - #[allow(dead_code)] - type Target = relay_rialto_client::Rialto; - - // Derive-account - #[allow(unused_imports)] - use bp_rialto::derive_account_from_millau_id as derive_account; - - // Relay-messages - #[allow(unused_imports)] - use crate::chains::millau_messages_to_rialto::MillauMessagesToRialto as MessagesLane; - - // Send-message / Estimate-fee - #[allow(unused_imports)] - use bp_rialto::TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; - // Send-message - #[allow(unused_imports)] - use millau_runtime::millau_to_rialto_account_ownership_digest as account_ownership_digest; - - $generic - } - FullBridge::RialtoToMillau => { - type Source = relay_rialto_client::Rialto; - #[allow(dead_code)] - type Target = relay_millau_client::Millau; - - // Derive-account - #[allow(unused_imports)] - use bp_millau::derive_account_from_rialto_id as derive_account; - - // Relay-messages - #[allow(unused_imports)] - use crate::chains::rialto_messages_to_millau::RialtoMessagesToMillau as MessagesLane; - - // Send-message / Estimate-fee - #[allow(unused_imports)] - use bp_millau::TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; - - // Send-message - #[allow(unused_imports)] - use rialto_runtime::rialto_to_millau_account_ownership_digest as account_ownership_digest; - - $generic - } - FullBridge::RococoToWococo => { - type Source = relay_rococo_client::Rococo; - #[allow(dead_code)] - type Target = relay_wococo_client::Wococo; - - // Derive-account - #[allow(unused_imports)] - use bp_wococo::derive_account_from_rococo_id as derive_account; - - // Relay-messages - #[allow(unused_imports)] - use crate::chains::rococo_messages_to_wococo::RococoMessagesToWococo as MessagesLane; - - // Send-message / Estimate-fee - #[allow(unused_imports)] - use bp_wococo::TO_WOCOCO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; - // Send-message - #[allow(unused_imports)] - use relay_rococo_client::runtime::rococo_to_wococo_account_ownership_digest as account_ownership_digest; - - $generic - } - FullBridge::WococoToRococo => { - type Source = relay_wococo_client::Wococo; - #[allow(dead_code)] - type Target = relay_rococo_client::Rococo; - - // Derive-account - #[allow(unused_imports)] - use bp_rococo::derive_account_from_wococo_id as derive_account; - - // Relay-messages - #[allow(unused_imports)] - use crate::chains::wococo_messages_to_rococo::WococoMessagesToRococo as MessagesLane; - - // Send-message / Estimate-fee - #[allow(unused_imports)] - use bp_rococo::TO_ROCOCO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; - // Send-message - #[allow(unused_imports)] - use relay_wococo_client::runtime::wococo_to_rococo_account_ownership_digest as account_ownership_digest; - - $generic - } - FullBridge::KusamaToPolkadot => { - type Source = relay_kusama_client::Kusama; - #[allow(dead_code)] - type Target = relay_polkadot_client::Polkadot; - - // Derive-account - #[allow(unused_imports)] - use bp_polkadot::derive_account_from_kusama_id as derive_account; - - // Relay-messages - #[allow(unused_imports)] - use crate::chains::kusama_messages_to_polkadot::KusamaMessagesToPolkadot as MessagesLane; - - // Send-message / Estimate-fee - #[allow(unused_imports)] - use bp_polkadot::TO_POLKADOT_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; - // Send-message - #[allow(unused_imports)] - use relay_kusama_client::runtime::kusama_to_polkadot_account_ownership_digest as account_ownership_digest; - - $generic - } - FullBridge::PolkadotToKusama => { - type Source = relay_polkadot_client::Polkadot; - #[allow(dead_code)] - type Target = relay_kusama_client::Kusama; - - // Derive-account - #[allow(unused_imports)] - use bp_kusama::derive_account_from_polkadot_id as derive_account; - - // Relay-messages - #[allow(unused_imports)] - use crate::chains::polkadot_messages_to_kusama::PolkadotMessagesToKusama as MessagesLane; - - // Send-message / Estimate-fee - #[allow(unused_imports)] - use bp_kusama::TO_KUSAMA_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_MESSAGE_FEE_METHOD; - // Send-message - #[allow(unused_imports)] - use relay_polkadot_client::runtime::polkadot_to_kusama_account_ownership_digest as account_ownership_digest; - - $generic - } - } - }; -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs b/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs deleted file mode 100644 index 5b809eb69f2..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/derive_account.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - cli::{bridge::FullBridge, AccountId}, - select_full_bridge, -}; -use relay_substrate_client::Chain; -use structopt::StructOpt; -use strum::VariantNames; - -/// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target chain. -/// -/// The (derived) target chain `AccountId` is going to be used as dispatch origin of the call -/// that has been sent over the bridge. -/// This account can also be used to receive target-chain funds (or other form of ownership), -/// since messages sent over the bridge will be able to spend these. -#[derive(StructOpt)] -pub struct DeriveAccount { - /// A bridge instance to initialize. - #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] - bridge: FullBridge, - /// Source-chain address to derive Target-chain address from. - account: AccountId, -} - -impl DeriveAccount { - /// Parse CLI arguments and derive account. - /// - /// Returns both the Source account in correct SS58 format and the derived account. - fn derive_account(&self) -> (AccountId, AccountId) { - select_full_bridge!(self.bridge, { - let mut account = self.account.clone(); - account.enforce_chain::(); - let acc = bp_runtime::SourceAccount::Account(account.raw_id()); - let id = derive_account(acc); - let derived_account = AccountId::from_raw::(id); - (account, derived_account) - }) - } - - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_full_bridge!(self.bridge, { - let (account, derived_account) = self.derive_account(); - println!("Source address:\n{} ({})", account, Source::NAME); - println!("->Corresponding (derived) address:\n{} ({})", derived_account, Target::NAME,); - - Ok(()) - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn derive_account_cli(bridge: &str, account: &str) -> (AccountId, AccountId) { - DeriveAccount::from_iter(vec!["derive-account", bridge, account]).derive_account() - } - - #[test] - fn should_derive_accounts_correctly() { - // given - let rialto = "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU"; - let millau = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9"; - - // when - let (rialto_parsed, rialto_derived) = derive_account_cli("rialto-to-millau", rialto); - let (millau_parsed, millau_derived) = derive_account_cli("millau-to-rialto", millau); - let (millau2_parsed, millau2_derived) = derive_account_cli("millau-to-rialto", rialto); - - // then - assert_eq!(format!("{}", rialto_parsed), rialto); - assert_eq!(format!("{}", millau_parsed), millau); - assert_eq!(format!("{}", millau2_parsed), millau); - - assert_eq!( - format!("{}", rialto_derived), - "74GNQjmkcfstRftSQPJgMREchqHM56EvAUXRc266cZ1NYVW5" - ); - assert_eq!( - format!("{}", millau_derived), - "5rERgaT1Z8nM3et2epA5i1VtEBfp5wkhwHtVE8HK7BRbjAH2" - ); - assert_eq!(millau_derived, millau2_derived); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs deleted file mode 100644 index e288e2c13d6..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/encode_call.rs +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - cli::{ - bridge::FullBridge, AccountId, Balance, CliChain, ExplicitOrMaximal, HexBytes, HexLaneId, - }, - select_full_bridge, -}; -use bp_runtime::EncodedOrDecodedCall; -use frame_support::weights::DispatchInfo; -use relay_substrate_client::Chain; -use structopt::StructOpt; -use strum::VariantNames; - -/// Encode source chain runtime call. -#[derive(StructOpt, Debug)] -pub struct EncodeCall { - /// A bridge instance to encode call for. - #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] - bridge: FullBridge, - #[structopt(flatten)] - call: Call, -} - -/// All possible messages that may be delivered to generic Substrate chain. -/// -/// Note this enum may be used in the context of both Source (as part of `encode-call`) -/// and Target chain (as part of `encode-message/send-message`). -#[derive(StructOpt, Debug, PartialEq, Eq)] -pub enum Call { - /// Raw bytes for the message - Raw { - /// Raw, SCALE-encoded message - data: HexBytes, - }, - /// Make an on-chain remark (comment). - Remark { - /// Explicit remark payload. - #[structopt(long, conflicts_with("remark-size"))] - remark_payload: Option, - /// Remark size. If not passed, small UTF8-encoded string is generated by relay as remark. - #[structopt(long, conflicts_with("remark-payload"))] - remark_size: Option>, - }, - /// Transfer the specified `amount` of native tokens to a particular `recipient`. - Transfer { - /// Address of an account to receive the transfer. - #[structopt(long)] - recipient: AccountId, - /// Amount of target tokens to send in target chain base currency units. - #[structopt(long)] - amount: Balance, - }, - /// A call to the specific Bridge Messages pallet to queue message to be sent over a bridge. - BridgeSendMessage { - /// An index of the bridge instance which represents the expected target chain. - #[structopt(skip = 255)] - bridge_instance_index: u8, - /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. - #[structopt(long, default_value = "00000000")] - lane: HexLaneId, - /// Raw SCALE-encoded Message Payload to submit to the messages pallet. - /// - /// This can be obtained by encoding call for the target chain. - #[structopt(long)] - payload: HexBytes, - /// Declared delivery and dispatch fee in base source-chain currency units. - #[structopt(long)] - fee: Balance, - }, -} - -pub trait CliEncodeCall: Chain { - /// Encode a CLI call. - fn encode_call(call: &Call) -> anyhow::Result>; - - /// Get dispatch info for the call. - fn get_dispatch_info(call: &EncodedOrDecodedCall) -> anyhow::Result; -} - -impl EncodeCall { - fn encode(&mut self) -> anyhow::Result { - select_full_bridge!(self.bridge, { - preprocess_call::(&mut self.call, self.bridge.bridge_instance_index()); - let call = Source::encode_call(&self.call)?; - - let encoded = HexBytes::encode(&call); - - log::info!(target: "bridge", "Generated {} call: {:#?}", Source::NAME, call); - log::info!(target: "bridge", "Weight of {} call: {}", Source::NAME, Source::get_dispatch_info(&call) - .map(|dispatch_info| format!("{}", dispatch_info.weight)) - .unwrap_or_else(|_| "".to_string()) - ); - log::info!(target: "bridge", "Encoded {} call: {:?}", Source::NAME, encoded); - - Ok(encoded) - }) - } - - /// Run the command. - pub async fn run(mut self) -> anyhow::Result<()> { - println!("{:?}", self.encode()?); - Ok(()) - } -} - -/// Prepare the call to be passed to [`CliEncodeCall::encode_call`]. -/// -/// This function will fill in all optional and missing pieces and will make sure that -/// values are converted to bridge-specific ones. -/// -/// Most importantly, the method will fill-in [`bridge_instance_index`] parameter for -/// target-chain specific calls. -pub(crate) fn preprocess_call( - call: &mut Call, - bridge_instance: u8, -) { - match *call { - Call::Raw { .. } => {}, - Call::Remark { ref remark_size, ref mut remark_payload } => - if remark_payload.is_none() { - *remark_payload = Some(HexBytes(generate_remark_payload( - remark_size, - compute_maximal_message_arguments_size( - Source::max_extrinsic_size(), - Target::max_extrinsic_size(), - ), - ))); - }, - Call::Transfer { ref mut recipient, .. } => { - recipient.enforce_chain::(); - }, - Call::BridgeSendMessage { ref mut bridge_instance_index, .. } => { - *bridge_instance_index = bridge_instance; - }, - }; -} - -fn generate_remark_payload( - remark_size: &Option>, - maximal_allowed_size: u32, -) -> Vec { - match remark_size { - Some(ExplicitOrMaximal::Explicit(remark_size)) => vec![0; *remark_size], - Some(ExplicitOrMaximal::Maximal) => vec![0; maximal_allowed_size as _], - None => format!( - "Unix time: {}", - std::time::SystemTime::now() - .duration_since(std::time::SystemTime::UNIX_EPOCH) - .unwrap_or_default() - .as_secs(), - ) - .as_bytes() - .to_vec(), - } -} - -pub(crate) fn compute_maximal_message_arguments_size( - maximal_source_extrinsic_size: u32, - maximal_target_extrinsic_size: u32, -) -> u32 { - // assume that both signed extensions and other arguments fit 1KB - let service_tx_bytes_on_source_chain = 1024; - let maximal_source_extrinsic_size = - maximal_source_extrinsic_size - service_tx_bytes_on_source_chain; - let maximal_call_size = bridge_runtime_common::messages::target::maximal_incoming_message_size( - maximal_target_extrinsic_size, - ); - let maximal_call_size = if maximal_call_size > maximal_source_extrinsic_size { - maximal_source_extrinsic_size - } else { - maximal_call_size - }; - - // bytes in Call encoding that are used to encode everything except arguments - let service_bytes = 1 + 1 + 4; - maximal_call_size - service_bytes -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cli::send_message::SendMessage; - - #[test] - fn should_encode_transfer_call() { - // given - let mut encode_call = EncodeCall::from_iter(vec![ - "encode-call", - "rialto-to-millau", - "transfer", - "--amount", - "12345", - "--recipient", - "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU", - ]); - - // when - let hex = encode_call.encode().unwrap(); - - // then - assert_eq!( - format!("{:?}", hex), - "0x040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0" - ); - } - - #[test] - fn should_encode_remark_with_default_payload() { - // given - let mut encode_call = - EncodeCall::from_iter(vec!["encode-call", "rialto-to-millau", "remark"]); - - // when - let hex = encode_call.encode().unwrap(); - - // then - assert!(format!("{:?}", hex).starts_with("0x000154556e69782074696d653a")); - } - - #[test] - fn should_encode_remark_with_explicit_payload() { - // given - let mut encode_call = EncodeCall::from_iter(vec![ - "encode-call", - "rialto-to-millau", - "remark", - "--remark-payload", - "1234", - ]); - - // when - let hex = encode_call.encode().unwrap(); - - // then - assert_eq!(format!("{:?}", hex), "0x0001081234"); - } - - #[test] - fn should_encode_remark_with_size() { - // given - let mut encode_call = EncodeCall::from_iter(vec![ - "encode-call", - "rialto-to-millau", - "remark", - "--remark-size", - "12", - ]); - - // when - let hex = encode_call.encode().unwrap(); - - // then - assert_eq!(format!("{:?}", hex), "0x000130000000000000000000000000"); - } - - #[test] - fn should_disallow_both_payload_and_size() { - // when - let err = EncodeCall::from_iter_safe(vec![ - "encode-call", - "rialto-to-millau", - "remark", - "--remark-payload", - "1234", - "--remark-size", - "12", - ]) - .unwrap_err(); - - // then - assert_eq!(err.kind, structopt::clap::ErrorKind::ArgumentConflict); - - let info = err.info.unwrap(); - assert!( - info.contains(&"remark-payload".to_string()) | - info.contains(&"remark-size".to_string()) - ) - } - - #[test] - fn should_encode_raw_call() { - // given - let mut encode_call = EncodeCall::from_iter(vec![ - "encode-call", - "rialto-to-millau", - "raw", - "040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0", - ]); - - // when - let hex = encode_call.encode().unwrap(); - - // then - assert_eq!( - format!("{:?}", hex), - "0x040000d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27de5c0" - ); - } - - #[async_std::test] - async fn should_encode_bridge_send_message_call() { - // given - let encode_message = SendMessage::from_iter(vec![ - "send-message", - "millau-to-rialto", - "--source-port", - "10946", - "--source-signer", - "//Alice", - "--target-signer", - "//Alice", - "--origin", - "Target", - "remark", - ]) - .encode_payload() - .await - .unwrap(); - - let mut encode_call = EncodeCall::from_iter(vec![ - "encode-call", - "rialto-to-millau", - "bridge-send-message", - "--fee", - "12345", - "--payload", - format!("{:}", &HexBytes::encode(&encode_message)).as_str(), - ]); - - // when - let call_hex = encode_call.encode().unwrap(); - - // then - assert!(format!("{:?}", call_hex).starts_with( - "0x0f030000000001000000000000000000000001d43593c715fdd31c61141abd04a99fd6822c8558854cc\ - de39a5684e7a56da27d01d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d01" - )) - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs deleted file mode 100644 index 677fc29ef15..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/encode_message.rs +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - cli::{bridge::FullBridge, AccountId, CliChain, HexBytes}, - select_full_bridge, -}; -use frame_support::weights::Weight; -use structopt::StructOpt; -use strum::VariantNames; - -/// Generic message payload. -#[derive(StructOpt, Debug, PartialEq, Eq)] -pub enum MessagePayload { - /// Raw, SCALE-encoded `MessagePayload`. - Raw { - /// Hex-encoded SCALE data. - data: HexBytes, - }, - /// Construct message to send over the bridge. - Call { - /// Message details. - #[structopt(flatten)] - call: crate::cli::encode_call::Call, - /// SS58 encoded Source account that will send the payload. - #[structopt(long)] - sender: AccountId, - /// Weight of the call. - /// - /// It must be specified if the chain runtime is not bundled with the relay, or if - /// you want to override bundled weight. - #[structopt(long)] - dispatch_weight: Option, - }, -} - -/// A `MessagePayload` to encode. -#[derive(StructOpt)] -pub struct EncodeMessage { - /// A bridge instance to initialize. - #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] - bridge: FullBridge, - #[structopt(flatten)] - payload: MessagePayload, -} - -impl EncodeMessage { - /// Run the command. - pub fn encode(self) -> anyhow::Result { - select_full_bridge!(self.bridge, { - let payload = - Source::encode_message(self.payload).map_err(|e| anyhow::format_err!("{}", e))?; - Ok(HexBytes::encode(&payload)) - }) - } - - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - let payload = self.encode()?; - println!("{:?}", payload); - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::crypto::Ss58Codec; - - #[test] - fn should_encode_raw_message() { - // given - let msg = "01000000e88514000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c040130000000000000000000000000"; - let encode_message = - EncodeMessage::from_iter(vec!["encode-message", "rialto-to-millau", "raw", msg]); - - // when - let hex = encode_message.encode().unwrap(); - - // then - assert_eq!(format!("{:?}", hex), format!("0x{}", msg)); - } - - #[test] - fn should_encode_remark_with_size() { - // given - let sender = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check(); - let encode_message = EncodeMessage::from_iter(vec![ - "encode-message", - "rialto-to-millau", - "call", - "--sender", - &sender, - "--dispatch-weight", - "42", - "remark", - "--remark-size", - "12", - ]); - - // when - let hex = encode_message.encode().unwrap(); - - // then - assert_eq!(format!("{:?}", hex), "0x010000002a0000000000000002d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d003c000130000000000000000000000000"); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs b/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs deleted file mode 100644 index bab625314e8..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/estimate_fee.rs +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - cli::{ - bridge::FullBridge, relay_headers_and_messages::CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO, - Balance, CliChain, HexBytes, HexLaneId, SourceConnectionParams, - }, - select_full_bridge, -}; -use bp_runtime::BalanceOf; -use codec::{Decode, Encode}; -use relay_substrate_client::Chain; -use sp_runtime::FixedU128; -use structopt::StructOpt; -use strum::VariantNames; -use substrate_relay_helper::helpers::tokens_conversion_rate_from_metrics; - -/// Estimate Delivery & Dispatch Fee command. -#[derive(StructOpt, Debug, PartialEq)] -pub struct EstimateFee { - /// A bridge instance to encode call for. - #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] - bridge: FullBridge, - #[structopt(flatten)] - source: SourceConnectionParams, - /// Hex-encoded id of lane that will be delivering the message. - #[structopt(long, default_value = "00000000")] - lane: HexLaneId, - /// A way to override conversion rate between bridge tokens. - /// - /// If not specified, conversion rate from runtime storage is used. It may be obsolete and - /// your message won't be relayed. - #[structopt(long)] - conversion_rate_override: Option, - /// Payload to send over the bridge. - #[structopt(flatten)] - payload: crate::cli::encode_message::MessagePayload, -} - -/// A way to override conversion rate between bridge tokens. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum ConversionRateOverride { - /// The actual conversion rate is computed in the same way how rate metric works. - Metric, - /// The actual conversion rate is specified explicitly. - Explicit(f64), -} - -impl std::str::FromStr for ConversionRateOverride { - type Err = String; - - fn from_str(s: &str) -> Result { - if s.to_lowercase() == "metric" { - return Ok(ConversionRateOverride::Metric) - } - - f64::from_str(s) - .map(ConversionRateOverride::Explicit) - .map_err(|e| format!("Failed to parse '{:?}'. Expected 'metric' or explicit value", e)) - } -} - -impl EstimateFee { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - let Self { source, bridge, lane, conversion_rate_override, payload } = self; - - select_full_bridge!(bridge, { - let source_client = source.to_client::().await?; - let lane = lane.into(); - let payload = - Source::encode_message(payload).map_err(|e| anyhow::format_err!("{:?}", e))?; - - let fee = estimate_message_delivery_and_dispatch_fee::( - &source_client, - conversion_rate_override, - ESTIMATE_MESSAGE_FEE_METHOD, - lane, - payload, - ) - .await?; - - log::info!(target: "bridge", "Fee: {:?}", Balance(fee as _)); - println!("{}", fee); - Ok(()) - }) - } -} - -/// The caller may provide target to source tokens conversion rate override to use in fee -/// computation. -pub(crate) async fn estimate_message_delivery_and_dispatch_fee< - Source: Chain, - Target: Chain, - P: Clone + Encode, ->( - client: &relay_substrate_client::Client, - conversion_rate_override: Option, - estimate_fee_method: &str, - lane: bp_messages::LaneId, - payload: P, -) -> anyhow::Result> { - // actual conversion rate CAN be lesser than the rate stored in the runtime. So we may try to - // pay lesser fee for the message delivery. But in this case, message may be rejected by the - // lane. So we MUST use the larger of two fees - one computed with stored fee and the one - // computed with actual fee. - - let conversion_rate_override = - match (conversion_rate_override, Source::TOKEN_ID, Target::TOKEN_ID) { - (Some(ConversionRateOverride::Explicit(v)), _, _) => { - let conversion_rate_override = FixedU128::from_float(v); - log::info!( - target: "bridge", - "{} -> {} conversion rate override: {:?} (explicit)", - Target::NAME, - Source::NAME, - conversion_rate_override.to_float(), - ); - Some(conversion_rate_override) - }, - ( - Some(ConversionRateOverride::Metric), - Some(source_token_id), - Some(target_token_id), - ) => { - let conversion_rate_override = - tokens_conversion_rate_from_metrics(target_token_id, source_token_id).await?; - // So we have current actual conversion rate and rate that is stored in the runtime. - // And we may simply choose the maximal of these. But what if right now there's - // rate update transaction on the way, that is updating rate to 10 seconds old - // actual rate, which is bigger than the current rate? Then our message will be - // rejected. - // - // So let's increase the actual rate by the same value that the conversion rate - // updater is using. - let increased_conversion_rate_override = FixedU128::from_float( - conversion_rate_override * (1.0 + CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO), - ); - log::info!( - target: "bridge", - "{} -> {} conversion rate override: {} (value from metric - {})", - Target::NAME, - Source::NAME, - increased_conversion_rate_override.to_float(), - conversion_rate_override, - ); - Some(increased_conversion_rate_override) - }, - _ => None, - }; - - let without_override = do_estimate_message_delivery_and_dispatch_fee( - client, - estimate_fee_method, - lane, - payload.clone(), - None, - ) - .await?; - let with_override = do_estimate_message_delivery_and_dispatch_fee( - client, - estimate_fee_method, - lane, - payload.clone(), - conversion_rate_override, - ) - .await?; - let maximal_fee = std::cmp::max(without_override, with_override); - - log::info!( - target: "bridge", - "Estimated message fee: {:?} = max of {:?} (without rate override) and {:?} (with override to {:?})", - maximal_fee, - without_override, - with_override, - conversion_rate_override, - ); - - Ok(maximal_fee) -} - -/// Estimate message delivery and dispatch fee with given conversion rate override. -async fn do_estimate_message_delivery_and_dispatch_fee( - client: &relay_substrate_client::Client, - estimate_fee_method: &str, - lane: bp_messages::LaneId, - payload: P, - conversion_rate_override: Option, -) -> anyhow::Result> { - let encoded_response = client - .state_call( - estimate_fee_method.into(), - (lane, payload, conversion_rate_override).encode().into(), - None, - ) - .await?; - let decoded_response: Option> = Decode::decode(&mut &encoded_response.0[..]) - .map_err(relay_substrate_client::Error::ResponseParseFailed)?; - let fee = decoded_response.ok_or_else(|| { - anyhow::format_err!("Unable to decode fee from: {:?}", HexBytes(encoded_response.to_vec())) - })?; - Ok(fee) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cli::{encode_call, RuntimeVersionType, SourceRuntimeVersionParams}; - use sp_core::crypto::Ss58Codec; - - #[test] - fn should_parse_cli_options() { - // given - let alice = sp_keyring::AccountKeyring::Alice.to_account_id().to_ss58check(); - - // when - let res = EstimateFee::from_iter(vec![ - "estimate_fee", - "rialto-to-millau", - "--source-port", - "1234", - "--conversion-rate-override", - "42.5", - "call", - "--sender", - &alice, - "--dispatch-weight", - "42", - "remark", - "--remark-payload", - "1234", - ]); - - // then - assert_eq!( - res, - EstimateFee { - bridge: FullBridge::RialtoToMillau, - lane: HexLaneId([0, 0, 0, 0]), - conversion_rate_override: Some(ConversionRateOverride::Explicit(42.5)), - source: SourceConnectionParams { - source_host: "127.0.0.1".into(), - source_port: 1234, - source_secure: false, - source_runtime_version: SourceRuntimeVersionParams { - source_version_mode: RuntimeVersionType::Bundle, - source_spec_version: None, - source_transaction_version: None, - } - }, - payload: crate::cli::encode_message::MessagePayload::Call { - sender: alice.parse().unwrap(), - call: encode_call::Call::Remark { - remark_payload: Some(HexBytes(vec![0x12, 0x34])), - remark_size: None, - }, - dispatch_weight: Some(42), - } - } - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs deleted file mode 100644 index a0129ce9baa..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/init_bridge.rs +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::{SourceConnectionParams, TargetConnectionParams, TargetSigningParams}; -use bp_header_chain::InitializationData; -use bp_runtime::Chain as ChainBase; -use codec::Encode; -use relay_substrate_client::{Chain, SignParam, TransactionSignScheme, UnsignedTransaction}; -use sp_core::{Bytes, Pair}; -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; - -/// Initialize bridge pallet. -#[derive(StructOpt)] -pub struct InitBridge { - /// A bridge instance to initialize. - #[structopt(possible_values = InitBridgeName::VARIANTS, case_insensitive = true)] - bridge: InitBridgeName, - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, -} - -#[derive(Debug, EnumString, EnumVariantNames)] -#[strum(serialize_all = "kebab_case")] -/// Bridge to initialize. -pub enum InitBridgeName { - MillauToRialto, - RialtoToMillau, - WestendToMillau, - RococoToWococo, - WococoToRococo, - KusamaToPolkadot, - PolkadotToKusama, -} - -macro_rules! select_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - InitBridgeName::MillauToRialto => { - type Source = relay_millau_client::Millau; - type Target = relay_rialto_client::Rialto; - - fn encode_init_bridge( - init_data: InitializationData<::Header>, - ) -> ::Call { - rialto_runtime::SudoCall::sudo { - call: Box::new( - rialto_runtime::BridgeGrandpaMillauCall::initialize { init_data } - .into(), - ), - } - .into() - } - - $generic - }, - InitBridgeName::RialtoToMillau => { - type Source = relay_rialto_client::Rialto; - type Target = relay_millau_client::Millau; - - fn encode_init_bridge( - init_data: InitializationData<::Header>, - ) -> ::Call { - let initialize_call = millau_runtime::BridgeGrandpaCall::< - millau_runtime::Runtime, - millau_runtime::RialtoGrandpaInstance, - >::initialize { - init_data, - }; - millau_runtime::SudoCall::sudo { call: Box::new(initialize_call.into()) }.into() - } - - $generic - }, - InitBridgeName::WestendToMillau => { - type Source = relay_westend_client::Westend; - type Target = relay_millau_client::Millau; - - fn encode_init_bridge( - init_data: InitializationData<::Header>, - ) -> ::Call { - // at Westend -> Millau initialization we're not using sudo, because otherwise - // our deployments may fail, because we need to initialize both Rialto -> Millau - // and Westend -> Millau bridge. => since there's single possible sudo account, - // one of transaction may fail with duplicate nonce error - millau_runtime::BridgeGrandpaCall::< - millau_runtime::Runtime, - millau_runtime::WestendGrandpaInstance, - >::initialize { - init_data, - } - .into() - } - - $generic - }, - InitBridgeName::RococoToWococo => { - type Source = relay_rococo_client::Rococo; - type Target = relay_wococo_client::Wococo; - - fn encode_init_bridge( - init_data: InitializationData<::Header>, - ) -> ::Call { - relay_wococo_client::runtime::Call::BridgeGrandpaRococo( - relay_wococo_client::runtime::BridgeGrandpaRococoCall::initialize( - init_data, - ), - ) - } - - $generic - }, - InitBridgeName::WococoToRococo => { - type Source = relay_wococo_client::Wococo; - type Target = relay_rococo_client::Rococo; - - fn encode_init_bridge( - init_data: InitializationData<::Header>, - ) -> ::Call { - relay_rococo_client::runtime::Call::BridgeGrandpaWococo( - relay_rococo_client::runtime::BridgeGrandpaWococoCall::initialize( - init_data, - ), - ) - } - - $generic - }, - InitBridgeName::KusamaToPolkadot => { - type Source = relay_kusama_client::Kusama; - type Target = relay_polkadot_client::Polkadot; - - fn encode_init_bridge( - init_data: InitializationData<::Header>, - ) -> ::Call { - relay_polkadot_client::runtime::Call::BridgeKusamaGrandpa( - relay_polkadot_client::runtime::BridgeKusamaGrandpaCall::initialize( - init_data, - ), - ) - } - - $generic - }, - InitBridgeName::PolkadotToKusama => { - type Source = relay_polkadot_client::Polkadot; - type Target = relay_kusama_client::Kusama; - - fn encode_init_bridge( - init_data: InitializationData<::Header>, - ) -> ::Call { - relay_kusama_client::runtime::Call::BridgePolkadotGrandpa( - relay_kusama_client::runtime::BridgePolkadotGrandpaCall::initialize( - init_data, - ), - ) - } - - $generic - }, - } - }; -} - -impl InitBridge { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_bridge!(self.bridge, { - let source_client = self.source.to_client::().await?; - let target_client = self.target.to_client::().await?; - let target_sign = self.target_sign.to_keypair::()?; - - let (spec_version, transaction_version) = - target_client.simple_runtime_version().await?; - substrate_relay_helper::headers_initialize::initialize( - source_client, - target_client.clone(), - target_sign.public().into(), - move |transaction_nonce, initialization_data| { - Ok(Bytes( - Target::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: *target_client.genesis_hash(), - signer: target_sign, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new( - encode_init_bridge(initialization_data).into(), - transaction_nonce, - ), - })? - .encode(), - )) - }, - ) - .await; - - Ok(()) - }) - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs b/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs deleted file mode 100644 index d931915a9bc..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/mod.rs +++ /dev/null @@ -1,749 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Deal with CLI args of substrate-to-substrate relay. - -use codec::{Decode, Encode}; -use relay_substrate_client::ChainRuntimeVersion; -use sp_runtime::app_crypto::Ss58Codec; -use structopt::{clap::arg_enum, StructOpt}; -use strum::{EnumString, EnumVariantNames}; - -use bp_messages::LaneId; - -pub(crate) mod bridge; -pub(crate) mod encode_call; -pub(crate) mod encode_message; -pub(crate) mod estimate_fee; -pub(crate) mod send_message; - -mod derive_account; -mod init_bridge; -mod register_parachain; -mod reinit_bridge; -mod relay_headers; -mod relay_headers_and_messages; -mod relay_messages; -mod resubmit_transactions; -mod swap_tokens; - -/// Parse relay CLI args. -pub fn parse_args() -> Command { - Command::from_args() -} - -/// Substrate-to-Substrate bridge utilities. -#[derive(StructOpt)] -#[structopt(about = "Substrate-to-Substrate relay")] -pub enum Command { - /// Start headers relay between two chains. - /// - /// The on-chain bridge component should have been already initialized with - /// `init-bridge` sub-command. - RelayHeaders(relay_headers::RelayHeaders), - /// Start messages relay between two chains. - /// - /// Ties up to `Messages` pallets on both chains and starts relaying messages. - /// Requires the header relay to be already running. - RelayMessages(relay_messages::RelayMessages), - /// Start headers and messages relay between two Substrate chains. - /// - /// This high-level relay internally starts four low-level relays: two `RelayHeaders` - /// and two `RelayMessages` relays. Headers are only relayed when they are required by - /// the message relays - i.e. when there are messages or confirmations that needs to be - /// relayed between chains. - RelayHeadersAndMessages(relay_headers_and_messages::RelayHeadersAndMessages), - /// Initialize on-chain bridge pallet with current header data. - /// - /// Sends initialization transaction to bootstrap the bridge with current finalized block data. - InitBridge(init_bridge::InitBridge), - /// Reinitialize on-chain bridge pallet with current header data. - /// - /// Sends all missing mandatory headers to bootstrap the bridge with current finalized block - /// data. - ReinitBridge(reinit_bridge::ReinitBridge), - /// Send custom message over the bridge. - /// - /// Allows interacting with the bridge by sending messages over `Messages` component. - /// The message is being sent to the source chain, delivered to the target chain and dispatched - /// there. - SendMessage(send_message::SendMessage), - /// Generate SCALE-encoded `Call` for the chosen network. - /// - /// The call can be used either as message payload or can be wrapped into a transaction - /// and executed on the chain directly. - EncodeCall(encode_call::EncodeCall), - /// Generate SCALE-encoded `MessagePayload` object that can be sent over selected bridge. - /// - /// The `MessagePayload` can be then fed to `Messages::send_message` function and sent over - /// the bridge. - EncodeMessage(encode_message::EncodeMessage), - /// Estimate Delivery and Dispatch Fee required for message submission to messages pallet. - EstimateFee(estimate_fee::EstimateFee), - /// Given a source chain `AccountId`, derive the corresponding `AccountId` for the target - /// chain. - DeriveAccount(derive_account::DeriveAccount), - /// Resubmit transactions with increased tip if they are stalled. - ResubmitTransactions(resubmit_transactions::ResubmitTransactions), - /// Swap tokens using token-swap bridge. - SwapTokens(swap_tokens::SwapTokens), - /// Register parachain. - RegisterParachain(register_parachain::RegisterParachain), -} - -impl Command { - // Initialize logger depending on the command. - fn init_logger(&self) { - use relay_utils::initialize::{initialize_logger, initialize_relay}; - - match self { - Self::RelayHeaders(_) | - Self::RelayMessages(_) | - Self::RelayHeadersAndMessages(_) | - Self::InitBridge(_) => { - initialize_relay(); - }, - _ => { - initialize_logger(false); - }, - } - } - - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - self.init_logger(); - match self { - Self::RelayHeaders(arg) => arg.run().await?, - Self::RelayMessages(arg) => arg.run().await?, - Self::RelayHeadersAndMessages(arg) => arg.run().await?, - Self::InitBridge(arg) => arg.run().await?, - Self::ReinitBridge(arg) => arg.run().await?, - Self::SendMessage(arg) => arg.run().await?, - Self::EncodeCall(arg) => arg.run().await?, - Self::EncodeMessage(arg) => arg.run().await?, - Self::EstimateFee(arg) => arg.run().await?, - Self::DeriveAccount(arg) => arg.run().await?, - Self::ResubmitTransactions(arg) => arg.run().await?, - Self::SwapTokens(arg) => arg.run().await?, - Self::RegisterParachain(arg) => arg.run().await?, - } - Ok(()) - } -} - -arg_enum! { - #[derive(Debug)] - /// The origin to use when dispatching the message on the target chain. - /// - /// - `Target` uses account existing on the target chain (requires target private key). - /// - `Origin` uses account derived from the source-chain account. - pub enum Origins { - Target, - Source, - } -} - -/// Generic balance type. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct Balance(pub u128); - -impl std::fmt::Display for Balance { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - use num_format::{Locale, ToFormattedString}; - write!(fmt, "{}", self.0.to_formatted_string(&Locale::en)) - } -} - -impl std::str::FromStr for Balance { - type Err = ::Err; - - fn from_str(s: &str) -> Result { - Ok(Self(s.parse()?)) - } -} - -impl Balance { - /// Cast balance to `u64` type, panicking if it's too large. - pub fn cast(&self) -> u64 { - self.0.try_into().expect("Balance is too high for this chain.") - } -} - -/// Generic account id with custom parser. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct AccountId { - account: sp_runtime::AccountId32, - ss58_format: sp_core::crypto::Ss58AddressFormat, -} - -impl std::fmt::Display for AccountId { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "{}", self.account.to_ss58check_with_version(self.ss58_format)) - } -} - -impl std::str::FromStr for AccountId { - type Err = String; - - fn from_str(s: &str) -> Result { - let (account, ss58_format) = sp_runtime::AccountId32::from_ss58check_with_version(s) - .map_err(|err| format!("Unable to decode SS58 address: {:?}", err))?; - Ok(Self { account, ss58_format }) - } -} - -const SS58_FORMAT_PROOF: &str = "u16 -> Ss58Format is infallible; qed"; - -impl AccountId { - /// Create new SS58-formatted address from raw account id. - pub fn from_raw(account: sp_runtime::AccountId32) -> Self { - Self { account, ss58_format: T::ss58_format().try_into().expect(SS58_FORMAT_PROOF) } - } - - /// Enforces formatting account to be for given [`CliChain`] type. - /// - /// This will change the `ss58format` of the account to match the requested one. - /// Note that a warning will be produced in case the current format does not match - /// the requested one, but the conversion always succeeds. - pub fn enforce_chain(&mut self) { - let original = self.clone(); - self.ss58_format = T::ss58_format().try_into().expect(SS58_FORMAT_PROOF); - log::debug!("{} SS58 format: {} (RAW: {})", self, self.ss58_format, self.account); - if original.ss58_format != self.ss58_format { - log::warn!( - target: "bridge", - "Address {} does not seem to match {}'s SS58 format (got: {}, expected: {}).\nConverted to: {}", - original, - T::NAME, - original.ss58_format, - self.ss58_format, - self, - ) - } - } - - /// Returns the raw (no SS58-prefixed) account id. - pub fn raw_id(&self) -> sp_runtime::AccountId32 { - self.account.clone() - } -} - -/// Bridge-supported network definition. -/// -/// Used to abstract away CLI commands. -pub trait CliChain: relay_substrate_client::Chain { - /// Current version of the chain runtime, known to relay. - const RUNTIME_VERSION: sp_version::RuntimeVersion; - - /// Crypto KeyPair type used to send messages. - /// - /// In case of chains supporting multiple cryptos, pick one used by the CLI. - type KeyPair: sp_core::crypto::Pair; - - /// Bridge Message Payload type. - /// - /// TODO [#854] This should be removed in favor of target-specifc types. - type MessagePayload; - - /// Numeric value of SS58 format. - fn ss58_format() -> u16; - - /// Construct message payload to be sent over the bridge. - fn encode_message( - message: crate::cli::encode_message::MessagePayload, - ) -> anyhow::Result; -} - -/// Lane id. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct HexLaneId(pub LaneId); - -impl From for LaneId { - fn from(lane_id: HexLaneId) -> LaneId { - lane_id.0 - } -} - -impl std::str::FromStr for HexLaneId { - type Err = hex::FromHexError; - - fn from_str(s: &str) -> Result { - let mut lane_id = LaneId::default(); - hex::decode_to_slice(s, &mut lane_id)?; - Ok(HexLaneId(lane_id)) - } -} - -/// Nicer formatting for raw bytes vectors. -#[derive(Default, Encode, Decode, PartialEq, Eq)] -pub struct HexBytes(pub Vec); - -impl std::str::FromStr for HexBytes { - type Err = hex::FromHexError; - - fn from_str(s: &str) -> Result { - Ok(Self(hex::decode(s)?)) - } -} - -impl std::fmt::Debug for HexBytes { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "0x{}", self) - } -} - -impl std::fmt::Display for HexBytes { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "{}", hex::encode(&self.0)) - } -} - -impl HexBytes { - /// Encode given object and wrap into nicely formatted bytes. - pub fn encode(t: &T) -> Self { - Self(t.encode()) - } -} - -/// Prometheus metrics params. -#[derive(StructOpt)] -pub struct PrometheusParams { - /// Do not expose a Prometheus metric endpoint. - #[structopt(long)] - pub no_prometheus: bool, - /// Expose Prometheus endpoint at given interface. - #[structopt(long, default_value = "127.0.0.1")] - pub prometheus_host: String, - /// Expose Prometheus endpoint at given port. - #[structopt(long, default_value = "9616")] - pub prometheus_port: u16, -} - -impl From for relay_utils::metrics::MetricsParams { - fn from(cli_params: PrometheusParams) -> relay_utils::metrics::MetricsParams { - if !cli_params.no_prometheus { - Some(relay_utils::metrics::MetricsAddress { - host: cli_params.prometheus_host, - port: cli_params.prometheus_port, - }) - .into() - } else { - None.into() - } - } -} - -/// Either explicit or maximal allowed value. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ExplicitOrMaximal { - /// User has explicitly specified argument value. - Explicit(V), - /// Maximal allowed value for this argument. - Maximal, -} - -impl std::str::FromStr for ExplicitOrMaximal -where - V::Err: std::fmt::Debug, -{ - type Err = String; - - fn from_str(s: &str) -> Result { - if s.to_lowercase() == "max" { - return Ok(ExplicitOrMaximal::Maximal) - } - - V::from_str(s) - .map(ExplicitOrMaximal::Explicit) - .map_err(|e| format!("Failed to parse '{:?}'. Expected 'max' or explicit value", e)) - } -} - -#[doc = "Runtime version params."] -#[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy, EnumString, EnumVariantNames)] -pub enum RuntimeVersionType { - /// Auto query version from chain - Auto, - /// Custom `spec_version` and `transaction_version` - Custom, - /// Read version from bundle dependencies directly. - Bundle, -} - -/// Create chain-specific set of configuration objects: connection parameters, -/// signing parameters and bridge initialization parameters. -#[macro_export] -macro_rules! declare_chain_options { - ($chain:ident, $chain_prefix:ident) => { - paste::item! { - #[doc = $chain " connection params."] - #[derive(StructOpt, Debug, PartialEq, Eq, Clone)] - pub struct [<$chain ConnectionParams>] { - #[doc = "Connect to " $chain " node at given host."] - #[structopt(long, default_value = "127.0.0.1")] - pub [<$chain_prefix _host>]: String, - #[doc = "Connect to " $chain " node websocket server at given port."] - #[structopt(long, default_value = "9944")] - pub [<$chain_prefix _port>]: u16, - #[doc = "Use secure websocket connection."] - #[structopt(long)] - pub [<$chain_prefix _secure>]: bool, - #[doc = "Custom runtime version"] - #[structopt(flatten)] - pub [<$chain_prefix _runtime_version>]: [<$chain RuntimeVersionParams>], - } - - #[doc = $chain " runtime version params."] - #[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy)] - pub struct [<$chain RuntimeVersionParams>] { - #[doc = "The type of runtime version for chain " $chain] - #[structopt(long, default_value = "Bundle")] - pub [<$chain_prefix _version_mode>]: RuntimeVersionType, - #[doc = "The custom sepc_version for chain " $chain] - #[structopt(long)] - pub [<$chain_prefix _spec_version>]: Option, - #[doc = "The custom transaction_version for chain " $chain] - #[structopt(long)] - pub [<$chain_prefix _transaction_version>]: Option, - } - - #[doc = $chain " signing params."] - #[derive(StructOpt, Debug, PartialEq, Eq, Clone)] - pub struct [<$chain SigningParams>] { - #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer>]: Option, - #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer_password>]: Option, - - #[doc = "Path to the file, that contains SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer option."] - #[structopt(long)] - pub [<$chain_prefix _signer_file>]: Option, - #[doc = "Path to the file, that password for the SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer_password option."] - #[structopt(long)] - pub [<$chain_prefix _signer_password_file>]: Option, - - #[doc = "Transactions mortality period, in blocks. MUST be a power of two in [4; 65536] range. MAY NOT be larger than `BlockHashCount` parameter of the chain system module."] - #[structopt(long)] - pub [<$chain_prefix _transactions_mortality>]: Option, - } - - #[doc = "Parameters required to sign transaction on behalf of owner of the messages pallet at " $chain "."] - #[derive(StructOpt, Debug, PartialEq, Eq)] - pub struct [<$chain MessagesPalletOwnerSigningParams>] { - #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _messages_pallet_owner>]: Option, - #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _messages_pallet_owner_password>]: Option, - } - - impl [<$chain SigningParams>] { - /// Return transactions mortality. - #[allow(dead_code)] - pub fn transactions_mortality(&self) -> anyhow::Result> { - self.[<$chain_prefix _transactions_mortality>] - .map(|transactions_mortality| { - if !(4..=65536).contains(&transactions_mortality) - || !transactions_mortality.is_power_of_two() - { - Err(anyhow::format_err!( - "Transactions mortality {} is not a power of two in a [4; 65536] range", - transactions_mortality, - )) - } else { - Ok(transactions_mortality) - } - }) - .transpose() - } - - /// Parse signing params into chain-specific KeyPair. - #[allow(dead_code)] - pub fn to_keypair(&self) -> anyhow::Result { - let suri = match (self.[<$chain_prefix _signer>].as_ref(), self.[<$chain_prefix _signer_file>].as_ref()) { - (Some(suri), _) => suri.to_owned(), - (None, Some(suri_file)) => std::fs::read_to_string(suri_file) - .map_err(|err| anyhow::format_err!( - "Failed to read SURI from file {:?}: {}", - suri_file, - err, - ))?, - (None, None) => return Err(anyhow::format_err!( - "One of options must be specified: '{}' or '{}'", - stringify!([<$chain_prefix _signer>]), - stringify!([<$chain_prefix _signer_file>]), - )), - }; - - let suri_password = match ( - self.[<$chain_prefix _signer_password>].as_ref(), - self.[<$chain_prefix _signer_password_file>].as_ref(), - ) { - (Some(suri_password), _) => Some(suri_password.to_owned()), - (None, Some(suri_password_file)) => std::fs::read_to_string(suri_password_file) - .map(Some) - .map_err(|err| anyhow::format_err!( - "Failed to read SURI password from file {:?}: {}", - suri_password_file, - err, - ))?, - _ => None, - }; - - use sp_core::crypto::Pair; - - Chain::KeyPair::from_string( - &suri, - suri_password.as_deref() - ).map_err(|e| anyhow::format_err!("{:?}", e)) - } - } - - #[allow(dead_code)] - impl [<$chain MessagesPalletOwnerSigningParams>] { - /// Parse signing params into chain-specific KeyPair. - pub fn to_keypair(&self) -> anyhow::Result> { - use sp_core::crypto::Pair; - - let [<$chain_prefix _messages_pallet_owner>] = match self.[<$chain_prefix _messages_pallet_owner>] { - Some(ref messages_pallet_owner) => messages_pallet_owner, - None => return Ok(None), - }; - Chain::KeyPair::from_string( - [<$chain_prefix _messages_pallet_owner>], - self.[<$chain_prefix _messages_pallet_owner_password>].as_deref() - ).map_err(|e| anyhow::format_err!("{:?}", e)).map(Some) - } - } - - impl [<$chain ConnectionParams>] { - /// Returns `true` if version guard can be started. - /// - /// There's no reason to run version guard when version mode is set to `Auto`. It can - /// lead to relay shutdown when chain is upgraded, even though we have explicitly - /// said that we don't want to shutdown. - #[allow(dead_code)] - pub fn can_start_version_guard(&self) -> bool { - self.[<$chain_prefix _runtime_version>].[<$chain_prefix _version_mode>] != RuntimeVersionType::Auto - } - - /// Convert connection params into Substrate client. - pub async fn to_client( - &self, - ) -> anyhow::Result> { - let chain_runtime_version = self - .[<$chain_prefix _runtime_version>] - .into_runtime_version(Some(Chain::RUNTIME_VERSION))?; - Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { - host: self.[<$chain_prefix _host>].clone(), - port: self.[<$chain_prefix _port>], - secure: self.[<$chain_prefix _secure>], - chain_runtime_version, - }) - .await - ) - } - - /// Return selected `chain_spec` version. - /// - /// This function only connects to the node if version mode is set to `Auto`. - #[allow(dead_code)] - pub async fn selected_chain_spec_version( - &self, - ) -> anyhow::Result { - let chain_runtime_version = self - .[<$chain_prefix _runtime_version>] - .into_runtime_version(Some(Chain::RUNTIME_VERSION))?; - Ok(match chain_runtime_version { - ChainRuntimeVersion::Auto => self - .to_client::() - .await? - .simple_runtime_version() - .await? - .0, - ChainRuntimeVersion::Custom(spec_version, _) => spec_version, - }) - } - } - - impl [<$chain RuntimeVersionParams>] { - /// Converts self into `ChainRuntimeVersion`. - pub fn into_runtime_version( - self, - bundle_runtime_version: Option, - ) -> anyhow::Result { - Ok(match self.[<$chain_prefix _version_mode>] { - RuntimeVersionType::Auto => ChainRuntimeVersion::Auto, - RuntimeVersionType::Custom => { - let except_spec_version = self.[<$chain_prefix _spec_version>] - .ok_or_else(|| anyhow::Error::msg(format!("The {}-spec-version is required when choose custom mode", stringify!($chain_prefix))))?; - let except_transaction_version = self.[<$chain_prefix _transaction_version>] - .ok_or_else(|| anyhow::Error::msg(format!("The {}-transaction-version is required when choose custom mode", stringify!($chain_prefix))))?; - ChainRuntimeVersion::Custom( - except_spec_version, - except_transaction_version - ) - }, - RuntimeVersionType::Bundle => match bundle_runtime_version { - Some(runtime_version) => ChainRuntimeVersion::Custom( - runtime_version.spec_version, - runtime_version.transaction_version - ), - None => ChainRuntimeVersion::Auto - }, - }) - } - } - } - }; -} - -declare_chain_options!(Source, source); -declare_chain_options!(Target, target); -declare_chain_options!(Relaychain, relaychain); -declare_chain_options!(Parachain, parachain); - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use sp_core::Pair; - - use super::*; - - #[test] - fn should_format_addresses_with_ss58_format() { - // given - let rialto1 = "5sauUXUfPjmwxSgmb3tZ5d6yx24eZX4wWJ2JtVUBaQqFbvEU"; - let rialto2 = "5rERgaT1Z8nM3et2epA5i1VtEBfp5wkhwHtVE8HK7BRbjAH2"; - let millau1 = "752paRyW1EGfq9YLTSSqcSJ5hqnBDidBmaftGhBo8fy6ypW9"; - let millau2 = "74GNQjmkcfstRftSQPJgMREchqHM56EvAUXRc266cZ1NYVW5"; - - let expected = vec![rialto1, rialto2, millau1, millau2]; - - // when - let parsed = expected.iter().map(|s| AccountId::from_str(s).unwrap()).collect::>(); - - let actual = parsed.iter().map(|a| format!("{}", a)).collect::>(); - - assert_eq!(actual, expected) - } - - #[test] - fn hex_bytes_display_matches_from_str_for_clap() { - // given - let hex = HexBytes(vec![1, 2, 3, 4]); - let display = format!("{}", hex); - - // when - let hex2: HexBytes = display.parse().unwrap(); - - // then - assert_eq!(hex.0, hex2.0); - } - - #[test] - fn reads_suri_from_file() { - const ALICE: &str = "//Alice"; - const BOB: &str = "//Bob"; - const ALICE_PASSWORD: &str = "alice_password"; - const BOB_PASSWORD: &str = "bob_password"; - - let alice = sp_core::sr25519::Pair::from_string(ALICE, Some(ALICE_PASSWORD)).unwrap(); - let bob = sp_core::sr25519::Pair::from_string(BOB, Some(BOB_PASSWORD)).unwrap(); - let bob_with_alice_password = - sp_core::sr25519::Pair::from_string(BOB, Some(ALICE_PASSWORD)).unwrap(); - - let temp_dir = tempfile::tempdir().unwrap(); - let mut suri_file_path = temp_dir.path().to_path_buf(); - let mut password_file_path = temp_dir.path().to_path_buf(); - suri_file_path.push("suri"); - password_file_path.push("password"); - std::fs::write(&suri_file_path, BOB.as_bytes()).unwrap(); - std::fs::write(&password_file_path, BOB_PASSWORD.as_bytes()).unwrap(); - - // when both seed and password are read from file - assert_eq!( - TargetSigningParams { - target_signer: Some(ALICE.into()), - target_signer_password: Some(ALICE_PASSWORD.into()), - - target_signer_file: None, - target_signer_password_file: None, - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(alice.public()), - ); - - // when both seed and password are read from file - assert_eq!( - TargetSigningParams { - target_signer: None, - target_signer_password: None, - - target_signer_file: Some(suri_file_path.clone()), - target_signer_password_file: Some(password_file_path.clone()), - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(bob.public()), - ); - - // when password are is overriden by cli option - assert_eq!( - TargetSigningParams { - target_signer: None, - target_signer_password: Some(ALICE_PASSWORD.into()), - - target_signer_file: Some(suri_file_path.clone()), - target_signer_password_file: Some(password_file_path.clone()), - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(bob_with_alice_password.public()), - ); - - // when both seed and password are overriden by cli options - assert_eq!( - TargetSigningParams { - target_signer: Some(ALICE.into()), - target_signer_password: Some(ALICE_PASSWORD.into()), - - target_signer_file: Some(suri_file_path), - target_signer_password_file: Some(password_file_path), - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(alice.public()), - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs b/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs deleted file mode 100644 index c761a5dd1a6..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/register_parachain.rs +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::{ - swap_tokens::wait_until_transaction_is_finalized, Balance, ParachainConnectionParams, - RelaychainConnectionParams, RelaychainSigningParams, -}; - -use codec::Encode; -use frame_support::Twox64Concat; -use num_traits::Zero; -use polkadot_parachain::primitives::{ - HeadData as ParaHeadData, Id as ParaId, ValidationCode as ParaValidationCode, -}; -use polkadot_runtime_common::{ - paras_registrar::Call as ParaRegistrarCall, slots::Call as ParaSlotsCall, -}; -use polkadot_runtime_parachains::paras::ParaLifecycle; -use relay_substrate_client::{ - AccountIdOf, CallOf, Chain, Client, SignParam, TransactionSignScheme, UnsignedTransaction, -}; -use rialto_runtime::SudoCall; -use sp_core::{ - storage::{well_known_keys::CODE, StorageKey}, - Bytes, Pair, -}; -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; - -/// Name of the `NextFreeParaId` value in the `polkadot_runtime_common::paras_registrar` pallet. -const NEXT_FREE_PARA_ID_STORAGE_NAME: &str = "NextFreeParaId"; -/// Name of the `ParaLifecycles` map in the `polkadot_runtime_parachains::paras` pallet. -const PARAS_LIFECYCLES_STORAGE_NAME: &str = "ParaLifecycles"; - -/// Register parachain. -#[derive(StructOpt, Debug, PartialEq)] -pub struct RegisterParachain { - /// A parachain to register. - #[structopt(possible_values = Parachain::VARIANTS, case_insensitive = true)] - parachain: Parachain, - /// Parachain deposit. - #[structopt(long, default_value = "0")] - deposit: Balance, - /// Lease begin. - #[structopt(long, default_value = "0")] - lease_begin: u32, - /// Lease end. - #[structopt(long, default_value = "256")] - lease_end: u32, - #[structopt(flatten)] - relay_connection: RelaychainConnectionParams, - #[structopt(flatten)] - relay_sign: RelaychainSigningParams, - #[structopt(flatten)] - para_connection: ParachainConnectionParams, -} - -/// Parachain to register. -#[derive(Debug, EnumString, EnumVariantNames, PartialEq)] -#[strum(serialize_all = "kebab_case")] -pub enum Parachain { - RialtoParachain, -} - -macro_rules! select_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - Parachain::RialtoParachain => { - type Relaychain = relay_rialto_client::Rialto; - type Parachain = relay_rialto_parachain_client::RialtoParachain; - - use bp_rialto::{PARAS_PALLET_NAME, PARAS_REGISTRAR_PALLET_NAME}; - - $generic - }, - } - }; -} - -impl RegisterParachain { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_bridge!(self.parachain, { - let relay_client = self.relay_connection.to_client::().await?; - let relay_sign = self.relay_sign.to_keypair::()?; - let para_client = self.para_connection.to_client::().await?; - - // hopefully we're the only actor that is registering parachain right now - // => read next parachain id - let para_id_key = bp_runtime::storage_value_final_key( - PARAS_REGISTRAR_PALLET_NAME.as_bytes(), - NEXT_FREE_PARA_ID_STORAGE_NAME.as_bytes(), - ); - let para_id: ParaId = relay_client - .storage_value(StorageKey(para_id_key.to_vec()), None) - .await? - .unwrap_or(polkadot_primitives::v2::LOWEST_PUBLIC_ID) - .max(polkadot_primitives::v2::LOWEST_PUBLIC_ID); - log::info!(target: "bridge", "Going to reserve parachain id: {:?}", para_id); - - // step 1: reserve a parachain id - let relay_genesis_hash = *relay_client.genesis_hash(); - let relay_sudo_account: AccountIdOf = relay_sign.public().into(); - let reserve_parachain_id_call: CallOf = - ParaRegistrarCall::reserve {}.into(); - let reserve_parachain_signer = relay_sign.clone(); - let (spec_version, transaction_version) = relay_client.simple_runtime_version().await?; - wait_until_transaction_is_finalized::( - relay_client - .submit_and_watch_signed_extrinsic( - relay_sudo_account.clone(), - move |_, transaction_nonce| { - Ok(Bytes( - Relaychain::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: relay_genesis_hash, - signer: reserve_parachain_signer, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new( - reserve_parachain_id_call.into(), - transaction_nonce, - ), - })? - .encode(), - )) - }, - ) - .await?, - ) - .await?; - log::info!(target: "bridge", "Reserved parachain id: {:?}", para_id); - - // step 2: register parathread - let para_genesis_header = para_client.header_by_number(Zero::zero()).await?; - let para_code = para_client - .raw_storage_value(StorageKey(CODE.to_vec()), Some(para_genesis_header.hash())) - .await? - .ok_or_else(|| { - anyhow::format_err!("Cannot fetch validation code of {}", Parachain::NAME) - })? - .0; - log::info!( - target: "bridge", - "Going to register parachain {:?}: genesis len = {} code len = {}", - para_id, - para_genesis_header.encode().len(), - para_code.len(), - ); - let register_parathread_call: CallOf = ParaRegistrarCall::register { - id: para_id, - genesis_head: ParaHeadData(para_genesis_header.encode()), - validation_code: ParaValidationCode(para_code), - } - .into(); - let register_parathread_signer = relay_sign.clone(); - wait_until_transaction_is_finalized::( - relay_client - .submit_and_watch_signed_extrinsic( - relay_sudo_account.clone(), - move |_, transaction_nonce| { - Ok(Bytes( - Relaychain::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: relay_genesis_hash, - signer: register_parathread_signer, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new( - register_parathread_call.into(), - transaction_nonce, - ), - })? - .encode(), - )) - }, - ) - .await?, - ) - .await?; - log::info!(target: "bridge", "Registered parachain: {:?}. Waiting for onboarding", para_id); - - // wait until parathread is onboarded - let para_state_key = bp_runtime::storage_map_final_key::( - PARAS_PALLET_NAME, - PARAS_LIFECYCLES_STORAGE_NAME, - ¶_id.encode(), - ); - wait_para_state( - &relay_client, - ¶_state_key.0, - &[ParaLifecycle::Onboarding, ParaLifecycle::Parathread], - ParaLifecycle::Parathread, - ) - .await?; - - // step 3: force parachain leases - let lease_begin = self.lease_begin; - let lease_end = self.lease_end; - let para_deposit = self.deposit.cast().into(); - log::info!( - target: "bridge", - "Going to force leases of parachain {:?}: [{}; {}]", - para_id, - lease_begin, - lease_end, - ); - let force_lease_call: CallOf = SudoCall::sudo { - call: Box::new( - ParaSlotsCall::force_lease { - para: para_id, - leaser: relay_sudo_account.clone(), - amount: para_deposit, - period_begin: lease_begin, - period_count: lease_end.saturating_sub(lease_begin).saturating_add(1), - } - .into(), - ), - } - .into(); - let force_lease_signer = relay_sign.clone(); - relay_client - .submit_signed_extrinsic(relay_sudo_account.clone(), move |_, transaction_nonce| { - Ok(Bytes( - Relaychain::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: relay_genesis_hash, - signer: force_lease_signer, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new( - force_lease_call.into(), - transaction_nonce, - ), - })? - .encode(), - )) - }) - .await?; - log::info!(target: "bridge", "Registered parachain leases: {:?}. Waiting for onboarding", para_id); - - // wait until parachain is onboarded - wait_para_state( - &relay_client, - ¶_state_key.0, - &[ - ParaLifecycle::Onboarding, - ParaLifecycle::UpgradingParathread, - ParaLifecycle::Parathread, - ], - ParaLifecycle::Parachain, - ) - .await?; - - Ok(()) - }) - } -} - -/// Wait until parachain state is changed. -async fn wait_para_state( - relay_client: &Client, - para_state_key: &[u8], - from_states: &[ParaLifecycle], - to_state: ParaLifecycle, -) -> anyhow::Result<()> { - loop { - let para_state: ParaLifecycle = relay_client - .storage_value(StorageKey(para_state_key.to_vec()), None) - .await? - .ok_or_else(|| { - anyhow::format_err!( - "Cannot fetch next free parachain lifecycle from the runtime storage of {}", - Relaychain::NAME, - ) - })?; - if para_state == to_state { - log::info!(target: "bridge", "Parachain state is now: {:?}", to_state); - return Ok(()) - } - if !from_states.contains(¶_state) { - return Err(anyhow::format_err!("Invalid parachain lifecycle: {:?}", para_state)) - } - - log::info!(target: "bridge", "Parachain state: {:?}. Waiting for {:?}", para_state, to_state); - async_std::task::sleep(Relaychain::AVERAGE_BLOCK_INTERVAL).await; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cli::{ - ParachainRuntimeVersionParams, RelaychainRuntimeVersionParams, RuntimeVersionType, - }; - - #[test] - fn register_rialto_parachain() { - let register_parachain = RegisterParachain::from_iter(vec![ - "register-parachain", - "rialto-parachain", - "--parachain-host", - "127.0.0.1", - "--parachain-port", - "11949", - "--relaychain-host", - "127.0.0.1", - "--relaychain-port", - "9944", - "--relaychain-signer", - "//Alice", - "--deposit", - "42", - "--lease-begin", - "100", - "--lease-end", - "200", - ]); - - assert_eq!( - register_parachain, - RegisterParachain { - parachain: Parachain::RialtoParachain, - deposit: Balance(42), - lease_begin: 100, - lease_end: 200, - relay_connection: RelaychainConnectionParams { - relaychain_host: "127.0.0.1".into(), - relaychain_port: 9944, - relaychain_secure: false, - relaychain_runtime_version: RelaychainRuntimeVersionParams { - relaychain_version_mode: RuntimeVersionType::Bundle, - relaychain_spec_version: None, - relaychain_transaction_version: None, - } - }, - relay_sign: RelaychainSigningParams { - relaychain_signer: Some("//Alice".into()), - relaychain_signer_password: None, - relaychain_signer_file: None, - relaychain_signer_password_file: None, - relaychain_transactions_mortality: None, - }, - para_connection: ParachainConnectionParams { - parachain_host: "127.0.0.1".into(), - parachain_port: 11949, - parachain_secure: false, - parachain_runtime_version: ParachainRuntimeVersionParams { - parachain_version_mode: RuntimeVersionType::Bundle, - parachain_spec_version: None, - parachain_transaction_version: None, - } - }, - } - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/reinit_bridge.rs b/polkadot/bridges/relays/bin-substrate/src/cli/reinit_bridge.rs deleted file mode 100644 index 957d082d5f8..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/reinit_bridge.rs +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - chains::{ - kusama_headers_to_polkadot::KusamaFinalityToPolkadot, - polkadot_headers_to_kusama::PolkadotFinalityToKusama, - }, - cli::{ - swap_tokens::wait_until_transaction_is_finalized, SourceConnectionParams, - TargetConnectionParams, TargetSigningParams, - }, -}; -use bp_header_chain::justification::GrandpaJustification; -use bp_runtime::Chain; -use codec::Encode; -use finality_relay::{SourceClient, SourceHeader}; -use frame_support::weights::Weight; -use num_traits::One; -use pallet_bridge_grandpa::weights::WeightInfo; -use relay_substrate_client::{ - AccountIdOf, BlockNumberOf, Chain as _, Client, Error as SubstrateError, HeaderOf, SignParam, - SyncHeader, TransactionEra, TransactionSignScheme, UnsignedTransaction, -}; -use sp_core::{Bytes, Pair}; -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; -use substrate_relay_helper::{ - finality_pipeline::SubstrateFinalitySyncPipeline, finality_source::SubstrateFinalitySource, - finality_target::SubstrateFinalityTarget, messages_source::read_client_state, - TransactionParams, -}; - -/// Reinitialize bridge pallet. -#[derive(Debug, PartialEq, StructOpt)] -pub struct ReinitBridge { - /// A bridge instance to reinitialize. - #[structopt(possible_values = ReinitBridgeName::VARIANTS, case_insensitive = true)] - bridge: ReinitBridgeName, - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, -} - -#[derive(Debug, EnumString, EnumVariantNames, PartialEq)] -#[strum(serialize_all = "kebab_case")] -/// Bridge to initialize. -pub enum ReinitBridgeName { - KusamaToPolkadot, - PolkadotToKusama, -} - -macro_rules! select_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - ReinitBridgeName::KusamaToPolkadot => { - use relay_polkadot_client::runtime; - - type Finality = KusamaFinalityToPolkadot; - type Call = runtime::Call; - - fn submit_finality_proof_call( - header_and_proof: HeaderAndProof, - ) -> runtime::Call { - runtime::Call::BridgeKusamaGrandpa( - runtime::BridgeKusamaGrandpaCall::submit_finality_proof( - Box::new(header_and_proof.0.into_inner()), - header_and_proof.1, - ), - ) - } - - fn set_pallet_operation_mode_call(operational: bool) -> runtime::Call { - runtime::Call::BridgeKusamaGrandpa( - runtime::BridgeKusamaGrandpaCall::set_operational(operational), - ) - } - - fn batch_all_call(calls: Vec) -> runtime::Call { - runtime::Call::Utility(runtime::UtilityCall::batch_all(calls)) - } - - $generic - }, - ReinitBridgeName::PolkadotToKusama => { - use relay_kusama_client::runtime; - - type Finality = PolkadotFinalityToKusama; - type Call = runtime::Call; - - fn submit_finality_proof_call( - header_and_proof: HeaderAndProof, - ) -> runtime::Call { - runtime::Call::BridgePolkadotGrandpa( - runtime::BridgePolkadotGrandpaCall::submit_finality_proof( - Box::new(header_and_proof.0.into_inner()), - header_and_proof.1, - ), - ) - } - - fn set_pallet_operation_mode_call(operational: bool) -> runtime::Call { - runtime::Call::BridgePolkadotGrandpa( - runtime::BridgePolkadotGrandpaCall::set_operational(operational), - ) - } - - fn batch_all_call(calls: Vec) -> runtime::Call { - runtime::Call::Utility(runtime::UtilityCall::batch_all(calls)) - } - - $generic - }, - } - }; -} - -impl ReinitBridge { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_bridge!(self.bridge, { - type Source = ::SourceChain; - type Target = ::TargetChain; - - let source_client = self.source.to_client::().await?; - let target_client = self.target.to_client::().await?; - let target_sign = self.target_sign.to_keypair::()?; - let transaction_params = TransactionParams { - signer: target_sign, - mortality: self.target_sign.target_transactions_mortality, - }; - - let finality_source = - SubstrateFinalitySource::::new(source_client.clone(), None); - let finality_target = SubstrateFinalityTarget::::new( - target_client.clone(), - transaction_params.clone(), - ); - - // this subcommand assumes that the pallet at the target chain is halted - ensure_pallet_operating_mode(&finality_target, false).await?; - - // we can't call `finality_target.best_finalized_source_block_id()`, because pallet is - // halted and the call will fail => just use what it uses internally - let current_number = - best_source_block_number_at_target::(&target_client).await?; - let target_number = finality_source.best_finalized_block_number().await?; - log::info!( - target: "bridge", - "Best finalized {} header: at {}: {}, at {}: {}", - Source::NAME, - Source::NAME, - target_number, - Target::NAME, - current_number, - ); - - // prepare list of mandatory headers from the range `(current_number; target_number]` - let headers_to_submit = find_mandatory_headers_in_range( - &finality_source, - (current_number + 1, target_number), - ) - .await?; - let latest_andatory_header_number = headers_to_submit.last().map(|(h, _)| h.number()); - log::info!( - target: "bridge", - "Missing {} mandatory {} headers at {}", - headers_to_submit.len(), - Source::NAME, - Target::NAME, - ); - - // split all mandatory headers into batches - let headers_batches = - make_mandatory_headers_batches::(headers_to_submit, |(_, proof)| { - // we don't have an access to the Kusama/Polkadot chain runtimes here, so we'll - // be using Millau weights. It isn't super-critical, unless real weights are - // magnitude higher or so - pallet_bridge_grandpa::weights::MillauWeight::::submit_finality_proof( - proof.commit.precommits.len().try_into().unwrap_or(u32::MAX), - proof.votes_ancestries.len().try_into().unwrap_or(u32::MAX), - ) - }); - log::info!( - target: "bridge", - "We're going to submit {} transactions to {} node", - headers_batches.len(), - Target::NAME, - ); - - // each batch is submitted as a separate transaction - let signer_account_id: AccountIdOf = transaction_params.signer.public().into(); - let genesis_hash = *target_client.genesis_hash(); - let (spec_version, transaction_version) = - target_client.simple_runtime_version().await?; - let last_batch_index = headers_batches.len() - 1; - for (i, headers_batch) in headers_batches.into_iter().enumerate() { - let is_last_batch = i == last_batch_index; - let expected_number = - headers_batch.last().expect("all batches are non-empty").0.number(); - let transaction_params = transaction_params.clone(); - log::info!( - target: "bridge", - "Going to submit transaction that updates best {} header at {} to {}", - Source::NAME, - Target::NAME, - expected_number, - ); - - // prepare `batch_all` call - let mut batch_calls = Vec::with_capacity(headers_batch.len() + 2); - // the first call is always resumes pallet operation - batch_calls.push(set_pallet_operation_mode_call(true)); - // followed by submit-finality-proofs calls - for header_and_proof in headers_batch { - batch_calls.push(submit_finality_proof_call(header_and_proof)); - } - // if it isn't the last batch, we shall halt pallet again - if !is_last_batch { - batch_calls.push(set_pallet_operation_mode_call(false)); - } - let submit_batch_call = batch_all_call(batch_calls); - - let batch_transaction_events = target_client - .submit_and_watch_signed_extrinsic( - signer_account_id.clone(), - move |best_block_id, transaction_nonce| { - Ok(Bytes( - Target::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash, - signer: transaction_params.signer.clone(), - era: TransactionEra::new( - best_block_id, - transaction_params.mortality, - ), - unsigned: UnsignedTransaction::new( - submit_batch_call.into(), - transaction_nonce, - ), - })? - .encode(), - )) - }, - ) - .await?; - wait_until_transaction_is_finalized::(batch_transaction_events).await?; - - // verify that the best finalized header at target has been updated - let current_number = - best_source_block_number_at_target::(&target_client).await?; - if current_number != expected_number { - return Err(anyhow::format_err!( - "Transaction has failed to update best {} header at {} to {}. It is {}", - Source::NAME, - Target::NAME, - expected_number, - current_number, - )) - } - - // verify that the pallet is still halted (or operational if it is the last batch) - ensure_pallet_operating_mode(&finality_target, is_last_batch).await?; - } - - if let Some(latest_andatory_header_number) = latest_andatory_header_number { - log::info!( - target: "bridge", - "Successfully updated best {} header at {} to {}. Pallet is now operational", - Source::NAME, - Target::NAME, - latest_andatory_header_number, - ); - } - - Ok(()) - }) - } -} - -/// Mandatory header and its finality proof. -type HeaderAndProof

= ( - SyncHeader::SourceChain>>, - GrandpaJustification::SourceChain>>, -); -/// Vector of mandatory headers and their finality proofs. -type HeadersAndProofs

= Vec>; - -/// Returns best finalized source header number known to the bridge GRANDPA pallet at the target -/// chain. -/// -/// This function works even if bridge GRANDPA pallet at the target chain is halted. -async fn best_source_block_number_at_target( - target_client: &Client, -) -> anyhow::Result> { - Ok(read_client_state::( - target_client, - None, - P::SourceChain::BEST_FINALIZED_HEADER_ID_METHOD, - ) - .await? - .best_finalized_peer_at_best_self - .0) -} - -/// Verify that the bridge GRANDPA pallet at the target chain is either halted, or operational. -async fn ensure_pallet_operating_mode( - finality_target: &SubstrateFinalityTarget

, - operational: bool, -) -> anyhow::Result<()> { - match (operational, finality_target.ensure_pallet_active().await) { - (true, Ok(())) => Ok(()), - (false, Err(SubstrateError::BridgePalletIsHalted)) => Ok(()), - _ => - return Err(anyhow::format_err!( - "Bridge GRANDPA pallet at {} is expected to be {}, but it isn't", - P::TargetChain::NAME, - if operational { "operational" } else { "halted" }, - )), - } -} - -/// Returns list of all mandatory headers in given range. -async fn find_mandatory_headers_in_range( - finality_source: &SubstrateFinalitySource

, - range: (BlockNumberOf, BlockNumberOf), -) -> anyhow::Result> { - let mut mandatory_headers = Vec::new(); - let mut current = range.0; - while current <= range.1 { - let (header, proof) = finality_source.header_and_finality_proof(current).await?; - if header.is_mandatory() { - match proof { - Some(proof) => mandatory_headers.push((header, proof)), - None => - return Err(anyhow::format_err!( - "Missing GRANDPA justification for {} header {}", - P::SourceChain::NAME, - current, - )), - } - } - - current += One::one(); - } - - Ok(mandatory_headers) -} - -/// Given list of mandatory headers, prepare batches of headers, so that every batch may fit into -/// single transaction. -fn make_mandatory_headers_batches< - P: SubstrateFinalitySyncPipeline, - F: Fn(&HeaderAndProof

) -> Weight, ->( - mut headers_to_submit: HeadersAndProofs

, - submit_header_weight: F, -) -> Vec> { - // now that we have all mandatory headers, let's prepare transactions - // (let's keep all our transactions below 2/3 of max tx size/weight to have some reserve - // for utility overhead + for halting transaction) - let maximal_tx_size = P::TargetChain::max_extrinsic_size() * 2 / 3; - let maximal_tx_weight = P::TargetChain::max_extrinsic_weight() * 2 / 3; - let mut current_batch_size: u32 = 0; - let mut current_batch_weight: Weight = 0; - let mut batches = Vec::new(); - let mut i = 0; - while i < headers_to_submit.len() { - let header_and_proof_size = - headers_to_submit[i].0.encode().len() + headers_to_submit[i].1.encode().len(); - let header_and_proof_weight = submit_header_weight(&headers_to_submit[i]); - - let new_batch_size = current_batch_size - .saturating_add(u32::try_from(header_and_proof_size).unwrap_or(u32::MAX)); - let new_batch_weight = current_batch_weight.saturating_add(header_and_proof_weight); - - let is_exceeding_tx_size = new_batch_size > maximal_tx_size; - let is_exceeding_tx_weight = new_batch_weight > maximal_tx_weight; - let is_new_batch_required = is_exceeding_tx_size || is_exceeding_tx_weight; - - if is_new_batch_required { - // if `i` is 0 and we're here, it is a weird situation: even single header submission is - // larger than we've planned for a bunch of headers. Let's be optimistic and hope that - // the tx will still succeed. - let spit_off_index = std::cmp::max(i, 1); - let remaining_headers_to_submit = headers_to_submit.split_off(spit_off_index); - batches.push(headers_to_submit); - - // we'll reiterate the same header again => so set `current_*` to zero - current_batch_size = 0; - current_batch_weight = 0; - headers_to_submit = remaining_headers_to_submit; - i = 0; - } else { - current_batch_size = new_batch_size; - current_batch_weight = new_batch_weight; - i += 1; - } - } - if !headers_to_submit.is_empty() { - batches.push(headers_to_submit); - } - batches -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cli::{RuntimeVersionType, SourceRuntimeVersionParams, TargetRuntimeVersionParams}; - use bp_test_utils::{make_default_justification, test_header}; - use relay_polkadot_client::Polkadot; - use sp_runtime::{traits::Header as _, DigestItem}; - - fn make_header_and_justification( - i: u32, - size: u32, - ) -> (SyncHeader, GrandpaJustification) { - let size = size as usize; - let mut header: bp_kusama::Header = test_header(i); - let justification = make_default_justification(&header); - let actual_size = header.encode().len() + justification.encode().len(); - // additional digest means some additional bytes, so let's decrease `additional_digest_size` - // a bit - let additional_digest_size = size.saturating_sub(actual_size).saturating_sub(100); - header.digest_mut().push(DigestItem::Other(vec![0u8; additional_digest_size])); - let justification = make_default_justification(&header); - println!("{} {}", size, header.encode().len() + justification.encode().len()); - (header.into(), justification) - } - - #[test] - fn should_parse_cli_options() { - // when - let res = ReinitBridge::from_iter(vec![ - "reinit-bridge", - "kusama-to-polkadot", - "--source-host", - "127.0.0.1", - "--source-port", - "42", - "--target-host", - "127.0.0.1", - "--target-port", - "43", - "--target-signer", - "//Alice", - ]); - - // then - assert_eq!( - res, - ReinitBridge { - bridge: ReinitBridgeName::KusamaToPolkadot, - source: SourceConnectionParams { - source_host: "127.0.0.1".into(), - source_port: 42, - source_secure: false, - source_runtime_version: SourceRuntimeVersionParams { - source_version_mode: RuntimeVersionType::Bundle, - source_spec_version: None, - source_transaction_version: None, - } - }, - target: TargetConnectionParams { - target_host: "127.0.0.1".into(), - target_port: 43, - target_secure: false, - target_runtime_version: TargetRuntimeVersionParams { - target_version_mode: RuntimeVersionType::Bundle, - target_spec_version: None, - target_transaction_version: None, - } - }, - target_sign: TargetSigningParams { - target_signer: Some("//Alice".into()), - target_signer_password: None, - target_signer_file: None, - target_signer_password_file: None, - target_transactions_mortality: None, - }, - } - ); - } - - #[test] - fn make_mandatory_headers_batches_and_empty_headers() { - let batches = make_mandatory_headers_batches::(vec![], |_| 0); - assert!(batches.is_empty()); - } - - #[test] - fn make_mandatory_headers_batches_with_single_batch() { - let headers_to_submit = - vec![make_header_and_justification(10, Polkadot::max_extrinsic_size() / 3)]; - let batches = - make_mandatory_headers_batches::(headers_to_submit, |_| 0); - assert_eq!(batches.into_iter().map(|x| x.len()).collect::>(), vec![1],); - } - - #[test] - fn make_mandatory_headers_batches_group_by_size() { - let headers_to_submit = vec![ - make_header_and_justification(10, Polkadot::max_extrinsic_size() / 3), - make_header_and_justification(20, Polkadot::max_extrinsic_size() / 3), - make_header_and_justification(30, Polkadot::max_extrinsic_size() * 2 / 3), - make_header_and_justification(40, Polkadot::max_extrinsic_size()), - ]; - let batches = - make_mandatory_headers_batches::(headers_to_submit, |_| 0); - assert_eq!(batches.into_iter().map(|x| x.len()).collect::>(), vec![2, 1, 1],); - } - - #[test] - fn make_mandatory_headers_batches_group_by_weight() { - let headers_to_submit = vec![ - make_header_and_justification(10, 0), - make_header_and_justification(20, 0), - make_header_and_justification(30, 0), - make_header_and_justification(40, 0), - ]; - let batches = make_mandatory_headers_batches::( - headers_to_submit, - |(header, _)| { - if header.number() == 10 || header.number() == 20 { - Polkadot::max_extrinsic_weight() / 3 - } else if header.number() == 30 { - Polkadot::max_extrinsic_weight() * 2 / 3 - } else { - Polkadot::max_extrinsic_weight() - } - }, - ); - assert_eq!(batches.into_iter().map(|x| x.len()).collect::>(), vec![2, 1, 1],); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs deleted file mode 100644 index 45034aba4b5..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers.rs +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; - -use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; -use substrate_relay_helper::finality_pipeline::SubstrateFinalitySyncPipeline; - -use crate::cli::{ - PrometheusParams, SourceConnectionParams, TargetConnectionParams, TargetSigningParams, -}; - -/// Start headers relayer process. -#[derive(StructOpt)] -pub struct RelayHeaders { - /// A bridge instance to relay headers for. - #[structopt(possible_values = RelayHeadersBridge::VARIANTS, case_insensitive = true)] - bridge: RelayHeadersBridge, - /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) - /// are relayed. - #[structopt(long)] - only_mandatory_headers: bool, - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, -} - -#[derive(Debug, EnumString, EnumVariantNames)] -#[strum(serialize_all = "kebab_case")] -/// Headers relay bridge. -pub enum RelayHeadersBridge { - MillauToRialto, - RialtoToMillau, - WestendToMillau, - RococoToWococo, - WococoToRococo, - KusamaToPolkadot, - PolkadotToKusama, -} - -macro_rules! select_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - RelayHeadersBridge::MillauToRialto => { - type Source = relay_millau_client::Millau; - type Target = relay_rialto_client::Rialto; - type Finality = crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; - - $generic - }, - RelayHeadersBridge::RialtoToMillau => { - type Source = relay_rialto_client::Rialto; - type Target = relay_millau_client::Millau; - type Finality = crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; - - $generic - }, - RelayHeadersBridge::WestendToMillau => { - type Source = relay_westend_client::Westend; - type Target = relay_millau_client::Millau; - type Finality = crate::chains::westend_headers_to_millau::WestendFinalityToMillau; - - $generic - }, - RelayHeadersBridge::RococoToWococo => { - type Source = relay_rococo_client::Rococo; - type Target = relay_wococo_client::Wococo; - type Finality = crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo; - - $generic - }, - RelayHeadersBridge::WococoToRococo => { - type Source = relay_wococo_client::Wococo; - type Target = relay_rococo_client::Rococo; - type Finality = crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo; - - $generic - }, - RelayHeadersBridge::KusamaToPolkadot => { - type Source = relay_kusama_client::Kusama; - type Target = relay_polkadot_client::Polkadot; - type Finality = crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot; - - $generic - }, - RelayHeadersBridge::PolkadotToKusama => { - type Source = relay_polkadot_client::Polkadot; - type Target = relay_kusama_client::Kusama; - type Finality = crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama; - - $generic - }, - } - }; -} - -impl RelayHeaders { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_bridge!(self.bridge, { - let source_client = self.source.to_client::().await?; - let target_client = self.target.to_client::().await?; - let target_transactions_mortality = self.target_sign.target_transactions_mortality; - let target_sign = self.target_sign.to_keypair::()?; - - let metrics_params: relay_utils::metrics::MetricsParams = self.prometheus_params.into(); - GlobalMetrics::new()?.register_and_spawn(&metrics_params.registry)?; - - let target_transactions_params = substrate_relay_helper::TransactionParams { - signer: target_sign, - mortality: target_transactions_mortality, - }; - Finality::start_relay_guards( - &target_client, - &target_transactions_params, - self.target.can_start_version_guard(), - ) - .await?; - - substrate_relay_helper::finality_pipeline::run::( - source_client, - target_client, - self.only_mandatory_headers, - target_transactions_params, - metrics_params, - ) - .await - }) - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs deleted file mode 100644 index 4ff6ee0947c..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_headers_and_messages.rs +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Complex headers+messages relays support. -//! -//! To add new complex relay between `ChainA` and `ChainB`, you must: -//! -//! 1) ensure that there's a `declare_chain_options!(...)` for both chains; -//! 2) add `declare_bridge_options!(...)` for the bridge; -//! 3) add bridge support to the `select_bridge! { ... }` macro. - -use futures::{FutureExt, TryFutureExt}; -use structopt::StructOpt; -use strum::VariantNames; - -use codec::Encode; -use messages_relay::relay_strategy::MixStrategy; -use relay_substrate_client::{ - AccountIdOf, CallOf, Chain, ChainRuntimeVersion, Client, SignParam, TransactionSignScheme, - UnsignedTransaction, -}; -use relay_utils::metrics::MetricsParams; -use sp_core::{Bytes, Pair}; -use substrate_relay_helper::{ - finality_pipeline::SubstrateFinalitySyncPipeline, messages_lane::MessagesRelayParams, - on_demand_headers::OnDemandHeadersRelay, TransactionParams, -}; - -use crate::{ - cli::{relay_messages::RelayerMode, CliChain, HexLaneId, PrometheusParams, RuntimeVersionType}, - declare_chain_options, -}; - -/// Maximal allowed conversion rate error ratio (abs(real - stored) / stored) that we allow. -/// -/// If it is zero, then transaction will be submitted every time we see difference between -/// stored and real conversion rates. If it is large enough (e.g. > than 10 percents, which is 0.1), -/// then rational relayers may stop relaying messages because they were submitted using -/// lesser conversion rate. -pub(crate) const CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO: f64 = 0.05; - -/// Start headers+messages relayer process. -#[derive(StructOpt)] -pub enum RelayHeadersAndMessages { - MillauRialto(MillauRialtoHeadersAndMessages), - RococoWococo(RococoWococoHeadersAndMessages), - KusamaPolkadot(KusamaPolkadotHeadersAndMessages), -} - -/// Parameters that have the same names across all bridges. -#[derive(StructOpt)] -pub struct HeadersAndMessagesSharedParams { - /// Hex-encoded lane identifiers that should be served by the complex relay. - #[structopt(long, default_value = "00000000")] - lane: Vec, - #[structopt(long, possible_values = RelayerMode::VARIANTS, case_insensitive = true, default_value = "rational")] - relayer_mode: RelayerMode, - /// Create relayers fund accounts on both chains, if it does not exists yet. - #[structopt(long)] - create_relayers_fund_accounts: bool, - /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) - /// are relayed. - #[structopt(long)] - only_mandatory_headers: bool, - #[structopt(flatten)] - prometheus_params: PrometheusParams, -} - -// The reason behind this macro is that 'normal' relays are using source and target chains -// terminology, which is unusable for both-way relays (if you're relaying headers from Rialto to -// Millau and from Millau to Rialto, then which chain is source?). -macro_rules! declare_bridge_options { - ($chain1:ident, $chain2:ident) => { - paste::item! { - #[doc = $chain1 " and " $chain2 " headers+messages relay params."] - #[derive(StructOpt)] - pub struct [<$chain1 $chain2 HeadersAndMessages>] { - #[structopt(flatten)] - shared: HeadersAndMessagesSharedParams, - #[structopt(flatten)] - left: [<$chain1 ConnectionParams>], - #[structopt(flatten)] - left_sign: [<$chain1 SigningParams>], - #[structopt(flatten)] - left_messages_pallet_owner: [<$chain1 MessagesPalletOwnerSigningParams>], - #[structopt(flatten)] - right: [<$chain2 ConnectionParams>], - #[structopt(flatten)] - right_sign: [<$chain2 SigningParams>], - #[structopt(flatten)] - right_messages_pallet_owner: [<$chain2 MessagesPalletOwnerSigningParams>], - } - - impl From for [<$chain1 $chain2 HeadersAndMessages>] { - fn from(relay_params: RelayHeadersAndMessages) -> [<$chain1 $chain2 HeadersAndMessages>] { - match relay_params { - RelayHeadersAndMessages::[<$chain1 $chain2>](params) => params, - _ => unreachable!(), - } - } - } - } - }; -} - -macro_rules! select_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - RelayHeadersAndMessages::MillauRialto(_) => { - type Params = MillauRialtoHeadersAndMessages; - - type Left = relay_millau_client::Millau; - type Right = relay_rialto_client::Rialto; - - type LeftToRightFinality = - crate::chains::millau_headers_to_rialto::MillauFinalityToRialto; - type RightToLeftFinality = - crate::chains::rialto_headers_to_millau::RialtoFinalityToMillau; - - type LeftAccountIdConverter = bp_millau::AccountIdConverter; - type RightAccountIdConverter = bp_rialto::AccountIdConverter; - - use crate::chains::{ - millau_messages_to_rialto::MillauMessagesToRialto as LeftToRightMessageLane, - rialto_messages_to_millau::RialtoMessagesToMillau as RightToLeftMessageLane, - }; - - async fn left_create_account( - _left_client: Client, - _left_sign: ::AccountKeyPair, - _account_id: AccountIdOf, - ) -> anyhow::Result<()> { - Err(anyhow::format_err!("Account creation is not supported by this bridge")) - } - - async fn right_create_account( - _right_client: Client, - _right_sign: ::AccountKeyPair, - _account_id: AccountIdOf, - ) -> anyhow::Result<()> { - Err(anyhow::format_err!("Account creation is not supported by this bridge")) - } - - $generic - }, - RelayHeadersAndMessages::RococoWococo(_) => { - type Params = RococoWococoHeadersAndMessages; - - type Left = relay_rococo_client::Rococo; - type Right = relay_wococo_client::Wococo; - - type LeftToRightFinality = - crate::chains::rococo_headers_to_wococo::RococoFinalityToWococo; - type RightToLeftFinality = - crate::chains::wococo_headers_to_rococo::WococoFinalityToRococo; - - type LeftAccountIdConverter = bp_rococo::AccountIdConverter; - type RightAccountIdConverter = bp_wococo::AccountIdConverter; - - use crate::chains::{ - rococo_messages_to_wococo::RococoMessagesToWococo as LeftToRightMessageLane, - wococo_messages_to_rococo::WococoMessagesToRococo as RightToLeftMessageLane, - }; - - async fn left_create_account( - left_client: Client, - left_sign: ::AccountKeyPair, - account_id: AccountIdOf, - ) -> anyhow::Result<()> { - submit_signed_extrinsic( - left_client, - left_sign, - relay_rococo_client::runtime::Call::Balances( - relay_rococo_client::runtime::BalancesCall::transfer( - bp_rococo::AccountAddress::Id(account_id), - bp_rococo::EXISTENTIAL_DEPOSIT.into(), - ), - ), - ) - .await - } - - async fn right_create_account( - right_client: Client, - right_sign: ::AccountKeyPair, - account_id: AccountIdOf, - ) -> anyhow::Result<()> { - submit_signed_extrinsic( - right_client, - right_sign, - relay_wococo_client::runtime::Call::Balances( - relay_wococo_client::runtime::BalancesCall::transfer( - bp_wococo::AccountAddress::Id(account_id), - bp_wococo::EXISTENTIAL_DEPOSIT.into(), - ), - ), - ) - .await - } - - $generic - }, - RelayHeadersAndMessages::KusamaPolkadot(_) => { - type Params = KusamaPolkadotHeadersAndMessages; - - type Left = relay_kusama_client::Kusama; - type Right = relay_polkadot_client::Polkadot; - - type LeftToRightFinality = - crate::chains::kusama_headers_to_polkadot::KusamaFinalityToPolkadot; - type RightToLeftFinality = - crate::chains::polkadot_headers_to_kusama::PolkadotFinalityToKusama; - - type LeftAccountIdConverter = bp_kusama::AccountIdConverter; - type RightAccountIdConverter = bp_polkadot::AccountIdConverter; - - use crate::chains::{ - kusama_messages_to_polkadot::KusamaMessagesToPolkadot as LeftToRightMessageLane, - polkadot_messages_to_kusama::PolkadotMessagesToKusama as RightToLeftMessageLane, - }; - - async fn left_create_account( - left_client: Client, - left_sign: ::AccountKeyPair, - account_id: AccountIdOf, - ) -> anyhow::Result<()> { - submit_signed_extrinsic( - left_client, - left_sign, - relay_kusama_client::runtime::Call::Balances( - relay_kusama_client::runtime::BalancesCall::transfer( - bp_kusama::AccountAddress::Id(account_id), - bp_kusama::EXISTENTIAL_DEPOSIT.into(), - ), - ), - ) - .await - } - - async fn right_create_account( - right_client: Client, - right_sign: ::AccountKeyPair, - account_id: AccountIdOf, - ) -> anyhow::Result<()> { - submit_signed_extrinsic( - right_client, - right_sign, - relay_polkadot_client::runtime::Call::Balances( - relay_polkadot_client::runtime::BalancesCall::transfer( - bp_polkadot::AccountAddress::Id(account_id), - bp_polkadot::EXISTENTIAL_DEPOSIT.into(), - ), - ), - ) - .await - } - - $generic - }, - } - }; -} - -// All supported chains. -declare_chain_options!(Millau, millau); -declare_chain_options!(Rialto, rialto); -declare_chain_options!(Rococo, rococo); -declare_chain_options!(Wococo, wococo); -declare_chain_options!(Kusama, kusama); -declare_chain_options!(Polkadot, polkadot); -// All supported bridges. -declare_bridge_options!(Millau, Rialto); -declare_bridge_options!(Rococo, Wococo); -declare_bridge_options!(Kusama, Polkadot); - -impl RelayHeadersAndMessages { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_bridge!(self, { - let params: Params = self.into(); - - let left_client = params.left.to_client::().await?; - let left_transactions_mortality = params.left_sign.transactions_mortality()?; - let left_sign = params.left_sign.to_keypair::()?; - let left_messages_pallet_owner = - params.left_messages_pallet_owner.to_keypair::()?; - let right_client = params.right.to_client::().await?; - let right_transactions_mortality = params.right_sign.transactions_mortality()?; - let right_sign = params.right_sign.to_keypair::()?; - let right_messages_pallet_owner = - params.right_messages_pallet_owner.to_keypair::()?; - - let lanes = params.shared.lane; - let relayer_mode = params.shared.relayer_mode.into(); - let relay_strategy = MixStrategy::new(relayer_mode); - - // create metrics registry and register standalone metrics - let metrics_params: MetricsParams = params.shared.prometheus_params.into(); - let metrics_params = relay_utils::relay_metrics(metrics_params).into_params(); - let left_to_right_metrics = - substrate_relay_helper::messages_metrics::standalone_metrics::< - LeftToRightMessageLane, - >(left_client.clone(), right_client.clone())?; - let right_to_left_metrics = left_to_right_metrics.clone().reverse(); - - // start conversion rate update loops for left/right chains - if let Some(left_messages_pallet_owner) = left_messages_pallet_owner.clone() { - let left_client = left_client.clone(); - let format_err = || { - anyhow::format_err!( - "Cannon run conversion rate updater: {} -> {}", - Right::NAME, - Left::NAME - ) - }; - substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop::< - LeftToRightMessageLane, - Left, - >( - left_client.clone(), - TransactionParams { - signer: left_messages_pallet_owner.clone(), - mortality: left_transactions_mortality, - }, - left_to_right_metrics - .target_to_source_conversion_rate - .as_ref() - .ok_or_else(format_err)? - .shared_value_ref(), - left_to_right_metrics - .target_to_base_conversion_rate - .as_ref() - .ok_or_else(format_err)? - .shared_value_ref(), - left_to_right_metrics - .source_to_base_conversion_rate - .as_ref() - .ok_or_else(format_err)? - .shared_value_ref(), - CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO, - ); - } - if let Some(right_messages_pallet_owner) = right_messages_pallet_owner.clone() { - let right_client = right_client.clone(); - let format_err = || { - anyhow::format_err!( - "Cannon run conversion rate updater: {} -> {}", - Left::NAME, - Right::NAME - ) - }; - substrate_relay_helper::conversion_rate_update::run_conversion_rate_update_loop::< - RightToLeftMessageLane, - Right, - >( - right_client.clone(), - TransactionParams { - signer: right_messages_pallet_owner.clone(), - mortality: right_transactions_mortality, - }, - right_to_left_metrics - .target_to_source_conversion_rate - .as_ref() - .ok_or_else(format_err)? - .shared_value_ref(), - right_to_left_metrics - .target_to_base_conversion_rate - .as_ref() - .ok_or_else(format_err)? - .shared_value_ref(), - right_to_left_metrics - .source_to_base_conversion_rate - .as_ref() - .ok_or_else(format_err)? - .shared_value_ref(), - CONVERSION_RATE_ALLOWED_DIFFERENCE_RATIO, - ); - } - - // optionally, create relayers fund account - if params.shared.create_relayers_fund_accounts { - let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::< - AccountIdOf, - LeftAccountIdConverter, - >(); - let relayers_fund_account_balance = - left_client.free_native_balance(relayer_fund_acount_id.clone()).await; - if let Err(relay_substrate_client::Error::AccountDoesNotExist) = - relayers_fund_account_balance - { - log::info!(target: "bridge", "Going to create relayers fund account at {}.", Left::NAME); - left_create_account( - left_client.clone(), - left_sign.clone(), - relayer_fund_acount_id, - ) - .await?; - } - - let relayer_fund_acount_id = pallet_bridge_messages::relayer_fund_account_id::< - AccountIdOf, - RightAccountIdConverter, - >(); - let relayers_fund_account_balance = - right_client.free_native_balance(relayer_fund_acount_id.clone()).await; - if let Err(relay_substrate_client::Error::AccountDoesNotExist) = - relayers_fund_account_balance - { - log::info!(target: "bridge", "Going to create relayers fund account at {}.", Right::NAME); - right_create_account( - right_client.clone(), - right_sign.clone(), - relayer_fund_acount_id, - ) - .await?; - } - } - - // add balance-related metrics - let metrics_params = - substrate_relay_helper::messages_metrics::add_relay_balances_metrics( - left_client.clone(), - metrics_params, - Some(left_sign.public().into()), - left_messages_pallet_owner.map(|kp| kp.public().into()), - ) - .await?; - let metrics_params = - substrate_relay_helper::messages_metrics::add_relay_balances_metrics( - right_client.clone(), - metrics_params, - Some(right_sign.public().into()), - right_messages_pallet_owner.map(|kp| kp.public().into()), - ) - .await?; - - // start on-demand header relays - let left_to_right_transaction_params = TransactionParams { - mortality: right_transactions_mortality, - signer: right_sign.clone(), - }; - let right_to_left_transaction_params = TransactionParams { - mortality: left_transactions_mortality, - signer: left_sign.clone(), - }; - LeftToRightFinality::start_relay_guards( - &right_client, - &left_to_right_transaction_params, - params.right.can_start_version_guard(), - ) - .await?; - RightToLeftFinality::start_relay_guards( - &left_client, - &right_to_left_transaction_params, - params.left.can_start_version_guard(), - ) - .await?; - let left_to_right_on_demand_headers = OnDemandHeadersRelay::new::( - left_client.clone(), - right_client.clone(), - left_to_right_transaction_params, - params.shared.only_mandatory_headers, - ); - let right_to_left_on_demand_headers = OnDemandHeadersRelay::new::( - right_client.clone(), - left_client.clone(), - right_to_left_transaction_params, - params.shared.only_mandatory_headers, - ); - - // Need 2x capacity since we consider both directions for each lane - let mut message_relays = Vec::with_capacity(lanes.len() * 2); - for lane in lanes { - let lane = lane.into(); - let left_to_right_messages = substrate_relay_helper::messages_lane::run::< - LeftToRightMessageLane, - >(MessagesRelayParams { - source_client: left_client.clone(), - source_transaction_params: TransactionParams { - signer: left_sign.clone(), - mortality: left_transactions_mortality, - }, - target_client: right_client.clone(), - target_transaction_params: TransactionParams { - signer: right_sign.clone(), - mortality: right_transactions_mortality, - }, - source_to_target_headers_relay: Some(left_to_right_on_demand_headers.clone()), - target_to_source_headers_relay: Some(right_to_left_on_demand_headers.clone()), - lane_id: lane, - metrics_params: metrics_params.clone().disable(), - standalone_metrics: Some(left_to_right_metrics.clone()), - relay_strategy: relay_strategy.clone(), - }) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); - let right_to_left_messages = substrate_relay_helper::messages_lane::run::< - RightToLeftMessageLane, - >(MessagesRelayParams { - source_client: right_client.clone(), - source_transaction_params: TransactionParams { - signer: right_sign.clone(), - mortality: right_transactions_mortality, - }, - target_client: left_client.clone(), - target_transaction_params: TransactionParams { - signer: left_sign.clone(), - mortality: left_transactions_mortality, - }, - source_to_target_headers_relay: Some(right_to_left_on_demand_headers.clone()), - target_to_source_headers_relay: Some(left_to_right_on_demand_headers.clone()), - lane_id: lane, - metrics_params: metrics_params.clone().disable(), - standalone_metrics: Some(right_to_left_metrics.clone()), - relay_strategy: relay_strategy.clone(), - }) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); - - message_relays.push(left_to_right_messages); - message_relays.push(right_to_left_messages); - } - - relay_utils::relay_metrics(metrics_params) - .expose() - .await - .map_err(|e| anyhow::format_err!("{}", e))?; - - futures::future::select_all(message_relays).await.0 - }) - } -} - -/// Sign and submit transaction with given call to the chain. -async fn submit_signed_extrinsic>( - client: Client, - sign: C::AccountKeyPair, - call: CallOf, -) -> anyhow::Result<()> -where - AccountIdOf: From<<::AccountKeyPair as Pair>::Public>, - CallOf: Send, -{ - let genesis_hash = *client.genesis_hash(); - let (spec_version, transaction_version) = client.simple_runtime_version().await?; - client - .submit_signed_extrinsic(sign.public().into(), move |_, transaction_nonce| { - Ok(Bytes( - C::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash, - signer: sign, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new(call.into(), transaction_nonce), - })? - .encode(), - )) - }) - .await - .map(drop) - .map_err(|e| anyhow::format_err!("{}", e)) -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs b/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs deleted file mode 100644 index 45087fad5eb..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/relay_messages.rs +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; - -use messages_relay::relay_strategy::MixStrategy; -use substrate_relay_helper::{messages_lane::MessagesRelayParams, TransactionParams}; - -use crate::{ - cli::{ - bridge::FullBridge, HexLaneId, PrometheusParams, SourceConnectionParams, - SourceSigningParams, TargetConnectionParams, TargetSigningParams, - }, - select_full_bridge, -}; - -/// Relayer operating mode. -#[derive(Debug, EnumString, EnumVariantNames, Clone, Copy, PartialEq)] -#[strum(serialize_all = "kebab_case")] -pub enum RelayerMode { - /// The relayer doesn't care about rewards. - Altruistic, - /// The relayer will deliver all messages and confirmations as long as he's not losing any - /// funds. - Rational, -} - -impl From for messages_relay::message_lane_loop::RelayerMode { - fn from(mode: RelayerMode) -> Self { - match mode { - RelayerMode::Altruistic => Self::Altruistic, - RelayerMode::Rational => Self::Rational, - } - } -} - -/// Start messages relayer process. -#[derive(StructOpt)] -pub struct RelayMessages { - /// A bridge instance to relay messages for. - #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] - bridge: FullBridge, - /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. - #[structopt(long, default_value = "00000000")] - lane: HexLaneId, - #[structopt(long, possible_values = RelayerMode::VARIANTS, case_insensitive = true, default_value = "rational")] - relayer_mode: RelayerMode, - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - source_sign: SourceSigningParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, -} - -impl RelayMessages { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_full_bridge!(self.bridge, { - let source_client = self.source.to_client::().await?; - let source_sign = self.source_sign.to_keypair::()?; - let source_transactions_mortality = self.source_sign.transactions_mortality()?; - let target_client = self.target.to_client::().await?; - let target_sign = self.target_sign.to_keypair::()?; - let target_transactions_mortality = self.target_sign.transactions_mortality()?; - let relayer_mode = self.relayer_mode.into(); - let relay_strategy = MixStrategy::new(relayer_mode); - - substrate_relay_helper::messages_lane::run::(MessagesRelayParams { - source_client, - source_transaction_params: TransactionParams { - signer: source_sign, - mortality: source_transactions_mortality, - }, - target_client, - target_transaction_params: TransactionParams { - signer: target_sign, - mortality: target_transactions_mortality, - }, - source_to_target_headers_relay: None, - target_to_source_headers_relay: None, - lane_id: self.lane.into(), - metrics_params: self.prometheus_params.into(), - standalone_metrics: None, - relay_strategy, - }) - .await - .map_err(|e| anyhow::format_err!("{}", e)) - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn should_use_rational_relayer_mode_by_default() { - assert_eq!( - RelayMessages::from_iter(vec![ - "relay-messages", - "rialto-to-millau", - "--source-port=0", - "--source-signer=//Alice", - "--target-port=0", - "--target-signer=//Alice", - "--lane=00000000", - ]) - .relayer_mode, - RelayerMode::Rational, - ); - } - - #[test] - fn should_accept_altruistic_relayer_mode() { - assert_eq!( - RelayMessages::from_iter(vec![ - "relay-messages", - "rialto-to-millau", - "--source-port=0", - "--source-signer=//Alice", - "--target-port=0", - "--target-signer=//Alice", - "--lane=00000000", - "--relayer-mode=altruistic", - ]) - .relayer_mode, - RelayerMode::Altruistic, - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs b/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs deleted file mode 100644 index f92c035082c..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/resubmit_transactions.rs +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::{Balance, TargetConnectionParams, TargetSigningParams}; - -use codec::{Decode, Encode}; -use num_traits::{One, Zero}; -use relay_substrate_client::{ - BlockWithJustification, Chain, Client, Error as SubstrateError, HeaderIdOf, HeaderOf, - SignParam, TransactionSignScheme, -}; -use relay_utils::{FailedClient, HeaderId}; -use sp_core::Bytes; -use sp_runtime::{ - traits::{Hash, Header as HeaderT}, - transaction_validity::TransactionPriority, -}; -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; -use substrate_relay_helper::TransactionParams; - -/// Start resubmit transactions process. -#[derive(StructOpt)] -pub struct ResubmitTransactions { - /// A bridge instance to relay headers for. - #[structopt(possible_values = RelayChain::VARIANTS, case_insensitive = true)] - chain: RelayChain, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - /// Number of blocks we see before considering queued transaction as stalled. - #[structopt(long, default_value = "5")] - stalled_blocks: u32, - /// Tip limit. We'll never submit transaction with larger tip. - #[structopt(long)] - tip_limit: Balance, - /// Tip increase step. We'll be checking updated transaction priority by increasing its tip by - /// this step. - #[structopt(long)] - tip_step: Balance, - /// Priority selection strategy. - #[structopt(subcommand)] - strategy: PrioritySelectionStrategy, -} - -/// Chain, which transactions we're going to track && resubmit. -#[derive(Debug, EnumString, EnumVariantNames)] -#[strum(serialize_all = "kebab_case")] -pub enum RelayChain { - Millau, - Kusama, - Polkadot, -} - -/// Strategy to use for priority selection. -#[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy)] -pub enum PrioritySelectionStrategy { - /// Strategy selects tip that changes transaction priority to be better than priority of - /// the first transaction of previous block. - /// - /// It only makes sense to use this strategy for Millau transactions. Millau has transactions - /// that are close to block limits, so if there are any other queued transactions, 'large' - /// transaction won't fit the block && will be postponed. To avoid this, we change its priority - /// to some large value, making it best transaction => it'll be 'mined' first. - MakeItBestTransaction, - /// Strategy selects tip that changes transaction priority to be better than priority of - /// selected queued transaction. - /// - /// When we first see stalled transaction, we make it better than worst 1/4 of queued - /// transactions. If it is still stalled, we'll make it better than 1/3 of queued transactions, - /// ... - MakeItBetterThanQueuedTransaction, -} - -macro_rules! select_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - RelayChain::Millau => { - type Target = relay_millau_client::Millau; - type TargetSign = relay_millau_client::Millau; - - $generic - }, - RelayChain::Kusama => { - type Target = relay_kusama_client::Kusama; - type TargetSign = relay_kusama_client::Kusama; - - $generic - }, - RelayChain::Polkadot => { - type Target = relay_polkadot_client::Polkadot; - type TargetSign = relay_polkadot_client::Polkadot; - - $generic - }, - } - }; -} - -impl ResubmitTransactions { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_bridge!(self.chain, { - let relay_loop_name = format!("ResubmitTransactions{}", Target::NAME); - let client = self.target.to_client::().await?; - let transaction_params = TransactionParams { - signer: self.target_sign.to_keypair::()?, - mortality: self.target_sign.target_transactions_mortality, - }; - - relay_utils::relay_loop((), client) - .run(relay_loop_name, move |_, client, _| { - run_until_connection_lost::( - client, - transaction_params.clone(), - Context { - strategy: self.strategy, - best_header: HeaderOf::::new( - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ), - transaction: None, - resubmitted: 0, - stalled_for: Zero::zero(), - stalled_for_limit: self.stalled_blocks as _, - tip_step: self.tip_step.cast() as _, - tip_limit: self.tip_limit.cast() as _, - }, - ) - }) - .await - .map_err(Into::into) - }) - } -} - -impl PrioritySelectionStrategy { - /// Select target priority. - async fn select_target_priority>( - &self, - client: &Client, - context: &Context, - ) -> Result, SubstrateError> { - match *self { - PrioritySelectionStrategy::MakeItBestTransaction => - read_previous_block_best_priority::(client, context).await, - PrioritySelectionStrategy::MakeItBetterThanQueuedTransaction => - select_priority_from_queue::(client, context).await, - } - } -} - -#[derive(Debug)] -struct Context { - /// Priority selection strategy. - strategy: PrioritySelectionStrategy, - /// Best known block header. - best_header: C::Header, - /// Hash of the (potentially) stalled transaction. - transaction: Option, - /// How many times we have resubmitted this `transaction`? - resubmitted: u32, - /// This transaction is in pool for `stalled_for` wakeup intervals. - stalled_for: C::BlockNumber, - /// When `stalled_for` reaching this limit, transaction is considered stalled. - stalled_for_limit: C::BlockNumber, - /// Tip step interval. - tip_step: C::Balance, - /// Maximal tip. - tip_limit: C::Balance, -} - -impl Context { - /// Return true if transaction has stalled. - fn is_stalled(&self) -> bool { - self.stalled_for >= self.stalled_for_limit - } - - /// Notice resubmitted transaction. - fn notice_resubmitted_transaction(mut self, transaction: C::Hash) -> Self { - self.transaction = Some(transaction); - self.stalled_for = Zero::zero(); - self.resubmitted += 1; - self - } - - /// Notice transaction from the transaction pool. - fn notice_transaction(mut self, transaction: C::Hash) -> Self { - if self.transaction == Some(transaction) { - self.stalled_for += One::one(); - } else { - self.transaction = Some(transaction); - self.stalled_for = One::one(); - self.resubmitted = 0; - } - self - } -} - -/// Run resubmit transactions loop. -async fn run_until_connection_lost>( - client: Client, - transaction_params: TransactionParams, - mut context: Context, -) -> Result<(), FailedClient> { - loop { - async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await; - - let result = - run_loop_iteration::(client.clone(), transaction_params.clone(), context).await; - context = match result { - Ok(context) => context, - Err(error) => { - log::error!( - target: "bridge", - "Resubmit {} transactions loop has failed with error: {:?}", - C::NAME, - error, - ); - return Err(FailedClient::Target) - }, - }; - } -} - -/// Run single loop iteration. -async fn run_loop_iteration>( - client: Client, - transaction_params: TransactionParams, - mut context: Context, -) -> Result, SubstrateError> { - // correct best header is required for all other actions - context.best_header = client.best_header().await?; - - // check if there's queued transaction, signed by given author - let original_transaction = - match lookup_signer_transaction::(&client, &transaction_params.signer).await? { - Some(original_transaction) => original_transaction, - None => { - log::trace!(target: "bridge", "No {} transactions from required signer in the txpool", C::NAME); - return Ok(context) - }, - }; - let original_transaction_hash = C::Hasher::hash(&original_transaction.encode()); - let context = context.notice_transaction(original_transaction_hash); - - // if transaction hasn't been mined for `stalled_blocks`, we'll need to resubmit it - if !context.is_stalled() { - log::trace!( - target: "bridge", - "{} transaction {:?} is not yet stalled ({:?}/{:?})", - C::NAME, - context.transaction, - context.stalled_for, - context.stalled_for_limit, - ); - return Ok(context) - } - - // select priority for updated transaction - let target_priority = - match context.strategy.select_target_priority::(&client, &context).await? { - Some(target_priority) => target_priority, - None => { - log::trace!(target: "bridge", "Failed to select target priority"); - return Ok(context) - }, - }; - - // update transaction tip - let (is_updated, updated_transaction) = update_transaction_tip::( - &client, - &transaction_params, - HeaderId(*context.best_header.number(), context.best_header.hash()), - original_transaction, - context.tip_step, - context.tip_limit, - target_priority, - ) - .await?; - - if !is_updated { - log::trace!(target: "bridge", "{} transaction tip can not be updated. Reached limit?", C::NAME); - return Ok(context) - } - - let updated_transaction = updated_transaction.encode(); - let updated_transaction_hash = C::Hasher::hash(&updated_transaction); - client.submit_unsigned_extrinsic(Bytes(updated_transaction)).await?; - - log::info!( - target: "bridge", - "Replaced {} transaction {} with {} in txpool", - C::NAME, - original_transaction_hash, - updated_transaction_hash, - ); - - Ok(context.notice_resubmitted_transaction(updated_transaction_hash)) -} - -/// Search transaction pool for transaction, signed by given key pair. -async fn lookup_signer_transaction>( - client: &Client, - key_pair: &S::AccountKeyPair, -) -> Result, SubstrateError> { - let pending_transactions = client.pending_extrinsics().await?; - for pending_transaction in pending_transactions { - let pending_transaction = S::SignedTransaction::decode(&mut &pending_transaction.0[..]) - .map_err(SubstrateError::ResponseParseFailed)?; - if !S::is_signed_by(key_pair, &pending_transaction) { - continue - } - - return Ok(Some(pending_transaction)) - } - - Ok(None) -} - -/// Read priority of best signed transaction of previous block. -async fn read_previous_block_best_priority>( - client: &Client, - context: &Context, -) -> Result, SubstrateError> { - let best_block = client.get_block(Some(context.best_header.hash())).await?; - let best_transaction = best_block - .extrinsics() - .iter() - .filter_map(|xt| S::SignedTransaction::decode(&mut &xt[..]).ok()) - .find(|xt| S::is_signed(xt)); - match best_transaction { - Some(best_transaction) => Ok(Some( - client - .validate_transaction(*context.best_header.parent_hash(), best_transaction) - .await?? - .priority, - )), - None => Ok(None), - } -} - -/// Select priority of some queued transaction. -async fn select_priority_from_queue>( - client: &Client, - context: &Context, -) -> Result, SubstrateError> { - // select transaction from the queue - let queued_transactions = client.pending_extrinsics().await?; - let selected_transaction = match select_transaction_from_queue(queued_transactions, context) { - Some(selected_transaction) => selected_transaction, - None => return Ok(None), - }; - - let selected_transaction = S::SignedTransaction::decode(&mut &selected_transaction[..]) - .map_err(SubstrateError::ResponseParseFailed)?; - let target_priority = client - .validate_transaction(context.best_header.hash(), selected_transaction) - .await?? - .priority; - Ok(Some(target_priority)) -} - -/// Select transaction with target priority from the vec of queued transactions. -fn select_transaction_from_queue( - mut queued_transactions: Vec, - context: &Context, -) -> Option { - if queued_transactions.is_empty() { - return None - } - - // the more times we resubmit transaction (`context.resubmitted`), the closer we move - // to the front of the transaction queue - let total_transactions = queued_transactions.len(); - let resubmitted_factor = context.resubmitted; - let divisor = - 1usize.saturating_add(1usize.checked_shl(resubmitted_factor).unwrap_or(usize::MAX)); - let transactions_to_skip = total_transactions / divisor; - - Some( - queued_transactions - .swap_remove(std::cmp::min(total_transactions - 1, transactions_to_skip)), - ) -} - -/// Try to find appropriate tip for transaction so that its priority is larger than given. -async fn update_transaction_tip>( - client: &Client, - transaction_params: &TransactionParams, - at_block: HeaderIdOf, - tx: S::SignedTransaction, - tip_step: C::Balance, - tip_limit: C::Balance, - target_priority: TransactionPriority, -) -> Result<(bool, S::SignedTransaction), SubstrateError> { - let stx = format!("{:?}", tx); - let mut current_priority = client.validate_transaction(at_block.1, tx.clone()).await??.priority; - let mut unsigned_tx = S::parse_transaction(tx).ok_or_else(|| { - SubstrateError::Custom(format!("Failed to parse {} transaction {}", C::NAME, stx,)) - })?; - let old_tip = unsigned_tx.tip; - - let (spec_version, transaction_version) = client.simple_runtime_version().await?; - while current_priority < target_priority { - let next_tip = unsigned_tx.tip + tip_step; - if next_tip > tip_limit { - break - } - - log::trace!( - target: "bridge", - "{} transaction priority with tip={:?}: {}. Target priority: {}", - C::NAME, - unsigned_tx.tip, - current_priority, - target_priority, - ); - - unsigned_tx.tip = next_tip; - current_priority = client - .validate_transaction( - at_block.1, - S::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: *client.genesis_hash(), - signer: transaction_params.signer.clone(), - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: unsigned_tx.clone(), - })?, - ) - .await?? - .priority; - } - - log::debug!( - target: "bridge", - "{} transaction tip has changed from {:?} to {:?}", - C::NAME, - old_tip, - unsigned_tx.tip, - ); - - Ok(( - old_tip != unsigned_tx.tip, - S::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: *client.genesis_hash(), - signer: transaction_params.signer.clone(), - era: relay_substrate_client::TransactionEra::new( - at_block, - transaction_params.mortality, - ), - unsigned: unsigned_tx, - })?, - )) -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_rialto::Hash; - use relay_rialto_client::Rialto; - - fn context() -> Context { - Context { - strategy: PrioritySelectionStrategy::MakeItBestTransaction, - best_header: HeaderOf::::new( - Default::default(), - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ), - transaction: None, - resubmitted: 0, - stalled_for: Zero::zero(), - stalled_for_limit: 3, - tip_step: 100, - tip_limit: 1000, - } - } - - #[test] - fn context_works() { - let mut context = context(); - - // when transaction is noticed 2/3 times, it isn't stalled - context = context.notice_transaction(Default::default()); - assert!(!context.is_stalled()); - assert_eq!(context.stalled_for, 1); - assert_eq!(context.resubmitted, 0); - context = context.notice_transaction(Default::default()); - assert!(!context.is_stalled()); - assert_eq!(context.stalled_for, 2); - assert_eq!(context.resubmitted, 0); - - // when transaction is noticed for 3rd time in a row, it is considered stalled - context = context.notice_transaction(Default::default()); - assert!(context.is_stalled()); - assert_eq!(context.stalled_for, 3); - assert_eq!(context.resubmitted, 0); - - // and after we resubmit it, we forget previous transaction - context = context.notice_resubmitted_transaction(Hash::from([1; 32])); - assert_eq!(context.transaction, Some(Hash::from([1; 32]))); - assert_eq!(context.resubmitted, 1); - assert_eq!(context.stalled_for, 0); - } - - #[test] - fn select_transaction_from_queue_works_with_empty_queue() { - assert_eq!(select_transaction_from_queue(vec![], &context()), None); - } - - #[test] - fn select_transaction_from_queue_works() { - let mut context = context(); - let queued_transactions = vec![ - Bytes(vec![1]), - Bytes(vec![2]), - Bytes(vec![3]), - Bytes(vec![4]), - Bytes(vec![5]), - Bytes(vec![6]), - ]; - - // when we resubmit tx for the first time, 1/2 of queue is skipped - assert_eq!( - select_transaction_from_queue(queued_transactions.clone(), &context), - Some(Bytes(vec![4])), - ); - - // when we resubmit tx for the second time, 1/3 of queue is skipped - context = context.notice_resubmitted_transaction(Hash::from([1; 32])); - assert_eq!( - select_transaction_from_queue(queued_transactions.clone(), &context), - Some(Bytes(vec![3])), - ); - - // when we resubmit tx for the third time, 1/5 of queue is skipped - context = context.notice_resubmitted_transaction(Hash::from([2; 32])); - assert_eq!( - select_transaction_from_queue(queued_transactions.clone(), &context), - Some(Bytes(vec![2])), - ); - - // when we resubmit tx for the second time, 1/9 of queue is skipped - context = context.notice_resubmitted_transaction(Hash::from([3; 32])); - assert_eq!( - select_transaction_from_queue(queued_transactions, &context), - Some(Bytes(vec![1])), - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs b/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs deleted file mode 100644 index ddb1ff59b5d..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/send_message.rs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::{ - bridge::FullBridge, - encode_call::{self, CliEncodeCall}, - estimate_fee::{estimate_message_delivery_and_dispatch_fee, ConversionRateOverride}, - Balance, ExplicitOrMaximal, HexBytes, HexLaneId, Origins, SourceConnectionParams, - SourceSigningParams, TargetConnectionParams, TargetSigningParams, -}; -use bp_message_dispatch::{CallOrigin, MessagePayload}; -use bp_runtime::Chain as _; -use codec::Encode; -use frame_support::weights::Weight; -use relay_substrate_client::{Chain, SignParam, TransactionSignScheme, UnsignedTransaction}; -use sp_core::{Bytes, Pair}; -use sp_runtime::{traits::IdentifyAccount, AccountId32, MultiSignature, MultiSigner}; -use std::fmt::Debug; -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; - -/// Relayer operating mode. -#[derive(Debug, EnumString, EnumVariantNames, Clone, Copy, PartialEq, Eq)] -#[strum(serialize_all = "kebab_case")] -pub enum DispatchFeePayment { - /// The dispatch fee is paid at the source chain. - AtSourceChain, - /// The dispatch fee is paid at the target chain. - AtTargetChain, -} - -impl From for bp_runtime::messages::DispatchFeePayment { - fn from(dispatch_fee_payment: DispatchFeePayment) -> Self { - match dispatch_fee_payment { - DispatchFeePayment::AtSourceChain => Self::AtSourceChain, - DispatchFeePayment::AtTargetChain => Self::AtTargetChain, - } - } -} - -/// Send bridge message. -#[derive(StructOpt)] -pub struct SendMessage { - /// A bridge instance to encode call for. - #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] - bridge: FullBridge, - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - source_sign: SourceSigningParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - /// Hex-encoded lane id. Defaults to `00000000`. - #[structopt(long, default_value = "00000000")] - lane: HexLaneId, - /// A way to override conversion rate between bridge tokens. - /// - /// If not specified, conversion rate from runtime storage is used. It may be obsolete and - /// your message won't be relayed. - #[structopt(long)] - conversion_rate_override: Option, - /// Where dispatch fee is paid? - #[structopt( - long, - possible_values = DispatchFeePayment::VARIANTS, - case_insensitive = true, - default_value = "at-source-chain", - )] - dispatch_fee_payment: DispatchFeePayment, - /// Dispatch weight of the message. If not passed, determined automatically. - #[structopt(long)] - dispatch_weight: Option>, - /// Delivery and dispatch fee in source chain base currency units. If not passed, determined - /// automatically. - #[structopt(long)] - fee: Option, - /// Message type. - #[structopt(subcommand)] - message: crate::cli::encode_call::Call, - /// The origin to use when dispatching the message on the target chain. Defaults to - /// `SourceAccount`. - #[structopt(long, possible_values = &Origins::variants(), default_value = "Source")] - origin: Origins, - - // Normally we don't need to connect to the target chain to send message. But for testing - // we may want to use **actual** `spec_version` of the target chain when composing a message. - // Then we'll need to read version from the target chain node. - #[structopt(flatten)] - target: TargetConnectionParams, -} - -impl SendMessage { - pub async fn encode_payload( - &mut self, - ) -> anyhow::Result>> { - crate::select_full_bridge!(self.bridge, { - let SendMessage { - source_sign, - target_sign, - ref mut message, - dispatch_fee_payment, - dispatch_weight, - origin, - bridge, - .. - } = self; - - let source_sign = source_sign.to_keypair::()?; - - encode_call::preprocess_call::(message, bridge.bridge_instance_index()); - let target_call = Target::encode_call(message)?; - let target_spec_version = self.target.selected_chain_spec_version::().await?; - - let payload = { - let target_call_weight = prepare_call_dispatch_weight( - dispatch_weight, - || { - Ok(ExplicitOrMaximal::Explicit( - Target::get_dispatch_info(&target_call)?.weight, - )) - }, - compute_maximal_message_dispatch_weight(Target::max_extrinsic_weight()), - )?; - let source_sender_public: MultiSigner = source_sign.public().into(); - let source_account_id = source_sender_public.into_account(); - - message_payload( - target_spec_version, - target_call_weight, - match origin { - Origins::Source => CallOrigin::SourceAccount(source_account_id), - Origins::Target => { - let target_sign = target_sign.to_keypair::()?; - let digest = account_ownership_digest( - &target_call, - source_account_id.clone(), - target_spec_version, - ); - let target_origin_public = target_sign.public(); - let digest_signature = target_sign.sign(&digest); - CallOrigin::TargetAccount( - source_account_id, - target_origin_public.into(), - digest_signature.into(), - ) - }, - }, - &target_call, - *dispatch_fee_payment, - ) - }; - Ok(payload) - }) - } - - /// Run the command. - pub async fn run(mut self) -> anyhow::Result<()> { - crate::select_full_bridge!(self.bridge, { - let payload = self.encode_payload().await?; - - let source_client = self.source.to_client::().await?; - let source_sign = self.source_sign.to_keypair::()?; - - let lane = self.lane.clone().into(); - let conversion_rate_override = self.conversion_rate_override; - let fee = match self.fee { - Some(fee) => fee, - None => Balance( - estimate_message_delivery_and_dispatch_fee::( - &source_client, - conversion_rate_override, - ESTIMATE_MESSAGE_FEE_METHOD, - lane, - payload.clone(), - ) - .await? as _, - ), - }; - let dispatch_weight = payload.weight; - let payload_len = payload.encode().len(); - let send_message_call = Source::encode_call(&encode_call::Call::BridgeSendMessage { - bridge_instance_index: self.bridge.bridge_instance_index(), - lane: self.lane, - payload: HexBytes::encode(&payload), - fee, - })?; - - let source_genesis_hash = *source_client.genesis_hash(); - let (spec_version, transaction_version) = - source_client.simple_runtime_version().await?; - let estimated_transaction_fee = source_client - .estimate_extrinsic_fee(Bytes( - Source::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: source_genesis_hash, - signer: source_sign.clone(), - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new(send_message_call.clone(), 0), - })? - .encode(), - )) - .await?; - source_client - .submit_signed_extrinsic(source_sign.public().into(), move |_, transaction_nonce| { - let signed_source_call = Source::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: source_genesis_hash, - signer: source_sign.clone(), - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new(send_message_call, transaction_nonce), - })? - .encode(); - - log::info!( - target: "bridge", - "Sending message to {}. Lane: {:?}. Size: {}. Dispatch weight: {}. Fee: {}", - Target::NAME, - lane, - payload_len, - dispatch_weight, - fee, - ); - log::info!( - target: "bridge", - "The source account ({:?}) balance will be reduced by (at most) {} (message fee) + {} (tx fee ) = {} {} tokens", - AccountId32::from(source_sign.public()), - fee.0, - estimated_transaction_fee.inclusion_fee(), - fee.0.saturating_add(estimated_transaction_fee.inclusion_fee() as _), - Source::NAME, - ); - log::info!( - target: "bridge", - "Signed {} Call: {:?}", - Source::NAME, - HexBytes::encode(&signed_source_call) - ); - - Ok(Bytes(signed_source_call)) - }) - .await?; - }); - - Ok(()) - } -} - -fn prepare_call_dispatch_weight( - user_specified_dispatch_weight: &Option>, - weight_from_pre_dispatch_call: impl Fn() -> anyhow::Result>, - maximal_allowed_weight: Weight, -) -> anyhow::Result { - match user_specified_dispatch_weight - .clone() - .map(Ok) - .unwrap_or_else(weight_from_pre_dispatch_call)? - { - ExplicitOrMaximal::Explicit(weight) => Ok(weight), - ExplicitOrMaximal::Maximal => Ok(maximal_allowed_weight), - } -} - -pub(crate) fn message_payload( - spec_version: u32, - weight: Weight, - origin: CallOrigin, - call: &impl Encode, - dispatch_fee_payment: DispatchFeePayment, -) -> MessagePayload> -where - SAccountId: Encode + Debug, - TPublic: Encode + Debug, - TSignature: Encode + Debug, -{ - // Display nicely formatted call. - let payload = MessagePayload { - spec_version, - weight, - origin, - dispatch_fee_payment: dispatch_fee_payment.into(), - call: HexBytes::encode(call), - }; - - log::info!(target: "bridge", "Created Message Payload: {:#?}", payload); - log::info!(target: "bridge", "Encoded Message Payload: {:?}", HexBytes::encode(&payload)); - - // re-pack to return `Vec` - let MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call } = payload; - MessagePayload { spec_version, weight, origin, dispatch_fee_payment, call: call.0 } -} - -pub(crate) fn compute_maximal_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - bridge_runtime_common::messages::target::maximal_incoming_message_dispatch_weight( - maximal_extrinsic_weight, - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cli::CliChain; - use hex_literal::hex; - - #[async_std::test] - async fn send_remark_rialto_to_millau() { - // given - let mut send_message = SendMessage::from_iter(vec![ - "send-message", - "rialto-to-millau", - "--source-port", - "1234", - "--source-signer", - "//Alice", - "--conversion-rate-override", - "0.75", - "remark", - "--remark-payload", - "1234", - ]); - - // when - let payload = send_message.encode_payload().await.unwrap(); - - // then - assert_eq!( - payload, - MessagePayload { - spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, - weight: 0, - origin: CallOrigin::SourceAccount( - sp_keyring::AccountKeyring::Alice.to_account_id() - ), - dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain, - call: hex!("0001081234").to_vec(), - } - ); - } - - #[async_std::test] - async fn send_remark_millau_to_rialto() { - // given - let mut send_message = SendMessage::from_iter(vec![ - "send-message", - "millau-to-rialto", - "--source-port", - "1234", - "--source-signer", - "//Alice", - "--origin", - "Target", - "--target-signer", - "//Bob", - "--conversion-rate-override", - "metric", - "remark", - "--remark-payload", - "1234", - ]); - - // when - let payload = send_message.encode_payload().await.unwrap(); - - // then - // Since signatures are randomized we extract it from here and only check the rest. - let signature = match payload.origin { - CallOrigin::TargetAccount(_, _, ref sig) => sig.clone(), - _ => panic!("Unexpected `CallOrigin`: {:?}", payload), - }; - assert_eq!( - payload, - MessagePayload { - spec_version: relay_millau_client::Millau::RUNTIME_VERSION.spec_version, - weight: 0, - origin: CallOrigin::TargetAccount( - sp_keyring::AccountKeyring::Alice.to_account_id(), - sp_keyring::AccountKeyring::Bob.into(), - signature, - ), - dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain, - call: hex!("0001081234").to_vec(), - } - ); - } - - #[test] - fn accepts_send_message_command_without_target_sign_options() { - // given - let send_message = SendMessage::from_iter_safe(vec![ - "send-message", - "rialto-to-millau", - "--source-port", - "1234", - "--source-signer", - "//Alice", - "--origin", - "Target", - "remark", - "--remark-payload", - "1234", - ]); - - assert!(send_message.is_ok()); - } - - #[async_std::test] - async fn accepts_non_default_dispatch_fee_payment() { - // given - let mut send_message = SendMessage::from_iter(vec![ - "send-message", - "rialto-to-millau", - "--source-port", - "1234", - "--source-signer", - "//Alice", - "--dispatch-fee-payment", - "at-target-chain", - "remark", - ]); - - // when - let payload = send_message.encode_payload().await.unwrap(); - - // then - assert_eq!( - payload.dispatch_fee_payment, - bp_runtime::messages::DispatchFeePayment::AtTargetChain - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs b/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs deleted file mode 100644 index 0758deddfd1..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/cli/swap_tokens.rs +++ /dev/null @@ -1,869 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tokens swap using token-swap bridge pallet. - -// TokenSwapBalances fields are never directly accessed, but the whole struct is printed -// to show token swap progress -#![allow(dead_code)] - -use codec::Encode; -use num_traits::One; -use rand::random; -use structopt::StructOpt; -use strum::{EnumString, EnumVariantNames, VariantNames}; - -use frame_support::dispatch::GetDispatchInfo; -use relay_substrate_client::{ - AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, CallOf, Chain, ChainWithBalances, - Client, Error as SubstrateError, HashOf, SignParam, SignatureOf, Subscription, - TransactionSignScheme, TransactionStatusOf, UnsignedTransaction, -}; -use sp_core::{blake2_256, storage::StorageKey, Bytes, Pair, U256}; -use sp_runtime::traits::{Convert, Header as HeaderT}; - -use crate::cli::{ - estimate_fee::ConversionRateOverride, Balance, CliChain, SourceConnectionParams, - SourceSigningParams, TargetConnectionParams, TargetSigningParams, -}; - -/// Swap tokens. -#[derive(StructOpt, Debug, PartialEq)] -pub struct SwapTokens { - /// A bridge instance to use in token swap. - #[structopt(possible_values = SwapTokensBridge::VARIANTS, case_insensitive = true)] - bridge: SwapTokensBridge, - - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - source_sign: SourceSigningParams, - - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - - #[structopt(subcommand)] - swap_type: TokenSwapType, - /// Source chain balance that source signer wants to swap. - #[structopt(long)] - source_balance: Balance, - /// Target chain balance that target signer wants to swap. - #[structopt(long)] - target_balance: Balance, - /// A way to override conversion rate from target to source tokens. - /// - /// If not specified, conversion rate from runtime storage is used. It may be obsolete and - /// your message won't be relayed. - #[structopt(long)] - target_to_source_conversion_rate_override: Option, - /// A way to override conversion rate from source to target tokens. - /// - /// If not specified, conversion rate from runtime storage is used. It may be obsolete and - /// your message won't be relayed. - #[structopt(long)] - source_to_target_conversion_rate_override: Option, -} - -/// Token swap type. -#[derive(StructOpt, Debug, PartialEq, Eq, Clone)] -pub enum TokenSwapType { - /// The `target_sign` is temporary and only have funds for single swap. - NoLock, - /// This swap type prevents `source_signer` from restarting the swap after it has been - /// completed. - LockUntilBlock { - /// Number of blocks before the swap expires. - #[structopt(long)] - blocks_before_expire: u32, - /// Unique swap nonce. - #[structopt(long)] - swap_nonce: Option, - }, -} - -/// Swap tokens bridge. -#[derive(Debug, EnumString, EnumVariantNames, PartialEq)] -#[strum(serialize_all = "kebab_case")] -pub enum SwapTokensBridge { - /// Use token-swap pallet deployed at Millau to swap tokens with Rialto. - MillauToRialto, -} - -macro_rules! select_bridge { - ($bridge: expr, $generic: tt) => { - match $bridge { - SwapTokensBridge::MillauToRialto => { - type Source = relay_millau_client::Millau; - type Target = relay_rialto_client::Rialto; - const SOURCE_SPEC_VERSION: u32 = millau_runtime::VERSION.spec_version; - const TARGET_SPEC_VERSION: u32 = rialto_runtime::VERSION.spec_version; - - type FromSwapToThisAccountIdConverter = bp_rialto::AccountIdConverter; - - use bp_millau::{ - derive_account_from_rialto_id as derive_source_account_from_target_account, - TO_MILLAU_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_TARGET_TO_SOURCE_MESSAGE_FEE_METHOD, - WITH_RIALTO_TOKEN_SWAP_PALLET_NAME as TOKEN_SWAP_PALLET_NAME, - }; - use bp_rialto::{ - derive_account_from_millau_id as derive_target_account_from_source_account, - TO_RIALTO_ESTIMATE_MESSAGE_FEE_METHOD as ESTIMATE_SOURCE_TO_TARGET_MESSAGE_FEE_METHOD, - }; - - const SOURCE_CHAIN_ID: bp_runtime::ChainId = bp_runtime::MILLAU_CHAIN_ID; - const TARGET_CHAIN_ID: bp_runtime::ChainId = bp_runtime::RIALTO_CHAIN_ID; - - const SOURCE_TO_TARGET_LANE_ID: bp_messages::LaneId = *b"swap"; - const TARGET_TO_SOURCE_LANE_ID: bp_messages::LaneId = [0, 0, 0, 0]; - - $generic - }, - } - }; -} - -impl SwapTokens { - /// Run the command. - pub async fn run(self) -> anyhow::Result<()> { - select_bridge!(self.bridge, { - let source_client = self.source.to_client::().await?; - let source_sign = self.source_sign.to_keypair::()?; - let target_client = self.target.to_client::().await?; - let target_sign = self.target_sign.to_keypair::()?; - let target_to_source_conversion_rate_override = - self.target_to_source_conversion_rate_override; - let source_to_target_conversion_rate_override = - self.source_to_target_conversion_rate_override; - - // names of variables in this function are matching names used by the - // `pallet-bridge-token-swap` - - // prepare token swap intention - let token_swap = self - .prepare_token_swap::(&source_client, &source_sign, &target_sign) - .await?; - - // group all accounts that will be used later - let accounts = TokenSwapAccounts { - source_account_at_bridged_chain: derive_target_account_from_source_account( - bp_runtime::SourceAccount::Account( - token_swap.source_account_at_this_chain.clone(), - ), - ), - target_account_at_this_chain: derive_source_account_from_target_account( - bp_runtime::SourceAccount::Account( - token_swap.target_account_at_bridged_chain.clone(), - ), - ), - source_account_at_this_chain: token_swap.source_account_at_this_chain.clone(), - target_account_at_bridged_chain: token_swap.target_account_at_bridged_chain.clone(), - swap_account: FromSwapToThisAccountIdConverter::convert( - token_swap.using_encoded(blake2_256).into(), - ), - }; - - // account balances are used to demonstrate what's happening :) - let initial_balances = - read_account_balances(&accounts, &source_client, &target_client).await?; - - // before calling something that may fail, log what we're trying to do - log::info!(target: "bridge", "Starting swap: {:?}", token_swap); - log::info!(target: "bridge", "Swap accounts: {:?}", accounts); - log::info!(target: "bridge", "Initial account balances: {:?}", initial_balances); - - // - // Step 1: swap is created - // - - // prepare `Currency::transfer` call that will happen at the target chain - let bridged_currency_transfer: CallOf = pallet_balances::Call::transfer { - dest: accounts.source_account_at_bridged_chain.clone().into(), - value: token_swap.target_balance_at_bridged_chain, - } - .into(); - let bridged_currency_transfer_weight = - bridged_currency_transfer.get_dispatch_info().weight; - - // sign message - let bridged_chain_spec_version = TARGET_SPEC_VERSION; - let signature_payload = pallet_bridge_dispatch::account_ownership_digest( - &bridged_currency_transfer, - &accounts.swap_account, - &bridged_chain_spec_version, - SOURCE_CHAIN_ID, - TARGET_CHAIN_ID, - ); - let bridged_currency_transfer_signature: SignatureOf = - target_sign.sign(&signature_payload).into(); - - // prepare `create_swap` call - let target_public_at_bridged_chain: AccountPublicOf = - target_sign.public().into(); - let swap_delivery_and_dispatch_fee = - crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee::< - Source, - Target, - _, - >( - &source_client, - target_to_source_conversion_rate_override.clone(), - ESTIMATE_SOURCE_TO_TARGET_MESSAGE_FEE_METHOD, - SOURCE_TO_TARGET_LANE_ID, - bp_message_dispatch::MessagePayload { - spec_version: TARGET_SPEC_VERSION, - weight: bridged_currency_transfer_weight, - origin: bp_message_dispatch::CallOrigin::TargetAccount( - accounts.swap_account.clone(), - target_public_at_bridged_chain.clone(), - bridged_currency_transfer_signature.clone(), - ), - dispatch_fee_payment: - bp_runtime::messages::DispatchFeePayment::AtTargetChain, - call: bridged_currency_transfer.encode(), - }, - ) - .await?; - let create_swap_call: CallOf = pallet_bridge_token_swap::Call::create_swap { - swap: token_swap.clone(), - swap_creation_params: Box::new(bp_token_swap::TokenSwapCreation { - target_public_at_bridged_chain, - swap_delivery_and_dispatch_fee, - bridged_chain_spec_version, - bridged_currency_transfer: bridged_currency_transfer.encode(), - bridged_currency_transfer_weight, - bridged_currency_transfer_signature, - }), - } - .into(); - - // start tokens swap - let source_genesis_hash = *source_client.genesis_hash(); - let create_swap_signer = source_sign.clone(); - let (spec_version, transaction_version) = - source_client.simple_runtime_version().await?; - let swap_created_at = wait_until_transaction_is_finalized::( - source_client - .submit_and_watch_signed_extrinsic( - accounts.source_account_at_this_chain.clone(), - move |_, transaction_nonce| { - Ok(Bytes( - Source::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: source_genesis_hash, - signer: create_swap_signer, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new( - create_swap_call.into(), - transaction_nonce, - ), - })? - .encode(), - )) - }, - ) - .await?, - ) - .await?; - - // read state of swap after it has been created - let token_swap_hash = token_swap.hash(); - let token_swap_storage_key = bp_token_swap::storage_keys::pending_swaps_key( - TOKEN_SWAP_PALLET_NAME, - token_swap_hash, - ); - match read_token_swap_state(&source_client, swap_created_at, &token_swap_storage_key) - .await? - { - Some(bp_token_swap::TokenSwapState::Started) => { - log::info!(target: "bridge", "Swap has been successfully started"); - let intermediate_balances = - read_account_balances(&accounts, &source_client, &target_client).await?; - log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances); - }, - Some(token_swap_state) => - return Err(anyhow::format_err!( - "Fresh token swap has unexpected state: {:?}", - token_swap_state, - )), - None => return Err(anyhow::format_err!("Failed to start token swap")), - }; - - // - // Step 2: message is being relayed to the target chain and dispathed there - // - - // wait until message is dispatched at the target chain and dispatch result delivered - // back to source chain - let token_swap_state = wait_until_token_swap_state_is_changed( - &source_client, - &token_swap_storage_key, - bp_token_swap::TokenSwapState::Started, - ) - .await?; - let is_transfer_succeeded = match token_swap_state { - Some(bp_token_swap::TokenSwapState::Started) => { - unreachable!("wait_until_token_swap_state_is_changed only returns if state is not Started; qed",) - }, - None => - return Err(anyhow::format_err!("Fresh token swap has disappeared unexpectedly")), - Some(bp_token_swap::TokenSwapState::Confirmed) => { - log::info!( - target: "bridge", - "Transfer has been successfully dispatched at the target chain. Swap can be claimed", - ); - true - }, - Some(bp_token_swap::TokenSwapState::Failed) => { - log::info!( - target: "bridge", - "Transfer has been dispatched with an error at the target chain. Swap can be canceled", - ); - false - }, - }; - - // by this time: (1) token swap account has been created and (2) if transfer has been - // successfully dispatched, both target chain balances have changed - let intermediate_balances = - read_account_balances(&accounts, &source_client, &target_client).await?; - log::info!(target: "bridge", "Intermediate balances: {:?}", intermediate_balances); - - // transfer has been dispatched, but we may need to wait until block where swap can be - // claimed/canceled - if let bp_token_swap::TokenSwapType::LockClaimUntilBlock( - ref last_available_block_number, - _, - ) = token_swap.swap_type - { - wait_until_swap_unlocked( - &source_client, - last_available_block_number + BlockNumberOf::::one(), - ) - .await?; - } - - // - // Step 3: we may now claim or cancel the swap - // - - if is_transfer_succeeded { - log::info!(target: "bridge", "Claiming the swap"); - - // prepare `claim_swap` message that will be sent over the bridge - let claim_swap_call: CallOf = - pallet_bridge_token_swap::Call::claim_swap { swap: token_swap }.into(); - let claim_swap_message = bp_message_dispatch::MessagePayload { - spec_version: SOURCE_SPEC_VERSION, - weight: claim_swap_call.get_dispatch_info().weight, - origin: bp_message_dispatch::CallOrigin::SourceAccount( - accounts.target_account_at_bridged_chain.clone(), - ), - dispatch_fee_payment: bp_runtime::messages::DispatchFeePayment::AtSourceChain, - call: claim_swap_call.encode(), - }; - let claim_swap_delivery_and_dispatch_fee = - crate::cli::estimate_fee::estimate_message_delivery_and_dispatch_fee::< - Target, - Source, - _, - >( - &target_client, - source_to_target_conversion_rate_override.clone(), - ESTIMATE_TARGET_TO_SOURCE_MESSAGE_FEE_METHOD, - TARGET_TO_SOURCE_LANE_ID, - claim_swap_message.clone(), - ) - .await?; - let send_message_call: CallOf = - pallet_bridge_messages::Call::send_message { - lane_id: TARGET_TO_SOURCE_LANE_ID, - payload: claim_swap_message, - delivery_and_dispatch_fee: claim_swap_delivery_and_dispatch_fee, - } - .into(); - - // send `claim_swap` message - let target_genesis_hash = *target_client.genesis_hash(); - let (spec_version, transaction_version) = - target_client.simple_runtime_version().await?; - let _ = wait_until_transaction_is_finalized::( - target_client - .submit_and_watch_signed_extrinsic( - accounts.target_account_at_bridged_chain.clone(), - move |_, transaction_nonce| { - Ok(Bytes( - Target::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: target_genesis_hash, - signer: target_sign, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new( - send_message_call.into(), - transaction_nonce, - ), - })? - .encode(), - )) - }, - ) - .await?, - ) - .await?; - - // wait until swap state is updated - let token_swap_state = wait_until_token_swap_state_is_changed( - &source_client, - &token_swap_storage_key, - bp_token_swap::TokenSwapState::Confirmed, - ) - .await?; - if token_swap_state != None { - return Err(anyhow::format_err!( - "Confirmed token swap state has been changed to {:?} unexpectedly", - token_swap_state - )) - } - } else { - log::info!(target: "bridge", "Cancelling the swap"); - let cancel_swap_call: CallOf = - pallet_bridge_token_swap::Call::cancel_swap { swap: token_swap.clone() }.into(); - let (spec_version, transaction_version) = - source_client.simple_runtime_version().await?; - let _ = wait_until_transaction_is_finalized::( - source_client - .submit_and_watch_signed_extrinsic( - accounts.source_account_at_this_chain.clone(), - move |_, transaction_nonce| { - Ok(Bytes( - Source::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: source_genesis_hash, - signer: source_sign, - era: relay_substrate_client::TransactionEra::immortal(), - unsigned: UnsignedTransaction::new( - cancel_swap_call.into(), - transaction_nonce, - ), - })? - .encode(), - )) - }, - ) - .await?, - ) - .await?; - } - - // print final balances - let final_balances = - read_account_balances(&accounts, &source_client, &target_client).await?; - log::info!(target: "bridge", "Final account balances: {:?}", final_balances); - - Ok(()) - }) - } - - /// Prepare token swap intention. - async fn prepare_token_swap( - &self, - source_client: &Client, - source_sign: &Source::KeyPair, - target_sign: &Target::KeyPair, - ) -> anyhow::Result< - bp_token_swap::TokenSwap< - BlockNumberOf, - BalanceOf, - AccountIdOf, - BalanceOf, - AccountIdOf, - >, - > - where - AccountIdOf: From<::Public>, - AccountIdOf: From<::Public>, - BalanceOf: From, - BalanceOf: From, - { - // accounts that are directly controlled by participants - let source_account_at_this_chain: AccountIdOf = source_sign.public().into(); - let target_account_at_bridged_chain: AccountIdOf = target_sign.public().into(); - - // balances that we're going to swap - let source_balance_at_this_chain: BalanceOf = self.source_balance.cast().into(); - let target_balance_at_bridged_chain: BalanceOf = self.target_balance.cast().into(); - - // prepare token swap intention - Ok(bp_token_swap::TokenSwap { - swap_type: self.prepare_token_swap_type(source_client).await?, - source_balance_at_this_chain, - source_account_at_this_chain: source_account_at_this_chain.clone(), - target_balance_at_bridged_chain, - target_account_at_bridged_chain: target_account_at_bridged_chain.clone(), - }) - } - - /// Prepare token swap type. - async fn prepare_token_swap_type( - &self, - source_client: &Client, - ) -> anyhow::Result>> { - match self.swap_type { - TokenSwapType::NoLock => - Ok(bp_token_swap::TokenSwapType::TemporaryTargetAccountAtBridgedChain), - TokenSwapType::LockUntilBlock { blocks_before_expire, ref swap_nonce } => { - let blocks_before_expire: BlockNumberOf = blocks_before_expire.into(); - let current_source_block_number = *source_client.best_header().await?.number(); - Ok(bp_token_swap::TokenSwapType::LockClaimUntilBlock( - current_source_block_number + blocks_before_expire, - swap_nonce.unwrap_or_else(|| { - U256::from(random::()).overflowing_mul(U256::from(random::())).0 - }), - )) - }, - } - } -} - -/// Accounts that are participating in the swap. -#[derive(Debug)] -struct TokenSwapAccounts { - source_account_at_this_chain: ThisAccountId, - source_account_at_bridged_chain: BridgedAccountId, - target_account_at_bridged_chain: BridgedAccountId, - target_account_at_this_chain: ThisAccountId, - swap_account: ThisAccountId, -} - -/// Swap accounts balances. -#[derive(Debug)] -struct TokenSwapBalances { - source_account_at_this_chain_balance: Option, - source_account_at_bridged_chain_balance: Option, - target_account_at_bridged_chain_balance: Option, - target_account_at_this_chain_balance: Option, - swap_account_balance: Option, -} - -/// Read swap accounts balances. -async fn read_account_balances( - accounts: &TokenSwapAccounts, AccountIdOf>, - source_client: &Client, - target_client: &Client, -) -> anyhow::Result, BalanceOf>> { - Ok(TokenSwapBalances { - source_account_at_this_chain_balance: read_account_balance( - source_client, - &accounts.source_account_at_this_chain, - ) - .await?, - source_account_at_bridged_chain_balance: read_account_balance( - target_client, - &accounts.source_account_at_bridged_chain, - ) - .await?, - target_account_at_bridged_chain_balance: read_account_balance( - target_client, - &accounts.target_account_at_bridged_chain, - ) - .await?, - target_account_at_this_chain_balance: read_account_balance( - source_client, - &accounts.target_account_at_this_chain, - ) - .await?, - swap_account_balance: read_account_balance(source_client, &accounts.swap_account).await?, - }) -} - -/// Read account balance. -async fn read_account_balance( - client: &Client, - account: &AccountIdOf, -) -> anyhow::Result>> { - match client.free_native_balance(account.clone()).await { - Ok(balance) => Ok(Some(balance)), - Err(SubstrateError::AccountDoesNotExist) => Ok(None), - Err(error) => Err(anyhow::format_err!( - "Failed to read balance of {} account {:?}: {:?}", - C::NAME, - account, - error, - )), - } -} - -/// Wait until transaction is included into finalized block. -/// -/// Returns the hash of the finalized block with transaction. -pub(crate) async fn wait_until_transaction_is_finalized( - subscription: Subscription>, -) -> anyhow::Result> { - loop { - let transaction_status = subscription.next().await?; - match transaction_status { - Some(TransactionStatusOf::::FinalityTimeout(_)) | - Some(TransactionStatusOf::::Usurped(_)) | - Some(TransactionStatusOf::::Dropped) | - Some(TransactionStatusOf::::Invalid) | - None => - return Err(anyhow::format_err!( - "We've been waiting for finalization of {} transaction, but it now has the {:?} status", - C::NAME, - transaction_status, - )), - Some(TransactionStatusOf::::Finalized(block_hash)) => { - log::trace!( - target: "bridge", - "{} transaction has been finalized at block {}", - C::NAME, - block_hash, - ); - return Ok(block_hash) - }, - _ => { - log::trace!( - target: "bridge", - "Received intermediate status of {} transaction: {:?}", - C::NAME, - transaction_status, - ); - }, - } - } -} - -/// Waits until token swap state is changed from `Started` to something else. -async fn wait_until_token_swap_state_is_changed( - client: &Client, - swap_state_storage_key: &StorageKey, - previous_token_swap_state: bp_token_swap::TokenSwapState, -) -> anyhow::Result> { - log::trace!(target: "bridge", "Waiting for token swap state change"); - loop { - async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await; - - let best_block = client.best_finalized_header_number().await?; - let best_block_hash = client.block_hash_by_number(best_block).await?; - log::trace!(target: "bridge", "Inspecting {} block {}/{}", C::NAME, best_block, best_block_hash); - - let token_swap_state = - read_token_swap_state(client, best_block_hash, swap_state_storage_key).await?; - match token_swap_state { - Some(new_token_swap_state) if new_token_swap_state == previous_token_swap_state => {}, - _ => { - log::trace!( - target: "bridge", - "Token swap state has been changed from {:?} to {:?}", - previous_token_swap_state, - token_swap_state, - ); - return Ok(token_swap_state) - }, - } - } -} - -/// Waits until swap can be claimed or canceled. -async fn wait_until_swap_unlocked( - client: &Client, - required_block_number: BlockNumberOf, -) -> anyhow::Result<()> { - log::trace!(target: "bridge", "Waiting for token swap unlock"); - loop { - async_std::task::sleep(C::AVERAGE_BLOCK_INTERVAL).await; - - let best_block = client.best_finalized_header_number().await?; - let best_block_hash = client.block_hash_by_number(best_block).await?; - if best_block >= required_block_number { - return Ok(()) - } - - log::trace!(target: "bridge", "Skipping {} block {}/{}", C::NAME, best_block, best_block_hash); - } -} - -/// Read state of the active token swap. -async fn read_token_swap_state( - client: &Client, - at_block: C::Hash, - swap_state_storage_key: &StorageKey, -) -> anyhow::Result> { - Ok(client.storage_value(swap_state_storage_key.clone(), Some(at_block)).await?) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cli::{RuntimeVersionType, SourceRuntimeVersionParams, TargetRuntimeVersionParams}; - - #[test] - fn swap_tokens_millau_to_rialto_no_lock() { - let swap_tokens = SwapTokens::from_iter(vec![ - "swap-tokens", - "millau-to-rialto", - "--source-host", - "127.0.0.1", - "--source-port", - "9000", - "--source-signer", - "//Alice", - "--source-balance", - "8000000000", - "--target-host", - "127.0.0.1", - "--target-port", - "9001", - "--target-signer", - "//Bob", - "--target-balance", - "9000000000", - "no-lock", - ]); - - assert_eq!( - swap_tokens, - SwapTokens { - bridge: SwapTokensBridge::MillauToRialto, - source: SourceConnectionParams { - source_host: "127.0.0.1".into(), - source_port: 9000, - source_secure: false, - source_runtime_version: SourceRuntimeVersionParams { - source_version_mode: RuntimeVersionType::Bundle, - source_spec_version: None, - source_transaction_version: None, - } - }, - source_sign: SourceSigningParams { - source_signer: Some("//Alice".into()), - source_signer_password: None, - source_signer_file: None, - source_signer_password_file: None, - source_transactions_mortality: None, - }, - target: TargetConnectionParams { - target_host: "127.0.0.1".into(), - target_port: 9001, - target_secure: false, - target_runtime_version: TargetRuntimeVersionParams { - target_version_mode: RuntimeVersionType::Bundle, - target_spec_version: None, - target_transaction_version: None, - } - }, - target_sign: TargetSigningParams { - target_signer: Some("//Bob".into()), - target_signer_password: None, - target_signer_file: None, - target_signer_password_file: None, - target_transactions_mortality: None, - }, - swap_type: TokenSwapType::NoLock, - source_balance: Balance(8000000000), - target_balance: Balance(9000000000), - target_to_source_conversion_rate_override: None, - source_to_target_conversion_rate_override: None, - } - ); - } - - #[test] - fn swap_tokens_millau_to_rialto_lock_until() { - let swap_tokens = SwapTokens::from_iter(vec![ - "swap-tokens", - "millau-to-rialto", - "--source-host", - "127.0.0.1", - "--source-port", - "9000", - "--source-signer", - "//Alice", - "--source-balance", - "8000000000", - "--target-host", - "127.0.0.1", - "--target-port", - "9001", - "--target-signer", - "//Bob", - "--target-balance", - "9000000000", - "--target-to-source-conversion-rate-override", - "metric", - "--source-to-target-conversion-rate-override", - "84.56", - "lock-until-block", - "--blocks-before-expire", - "1", - ]); - - assert_eq!( - swap_tokens, - SwapTokens { - bridge: SwapTokensBridge::MillauToRialto, - source: SourceConnectionParams { - source_host: "127.0.0.1".into(), - source_port: 9000, - source_secure: false, - source_runtime_version: SourceRuntimeVersionParams { - source_version_mode: RuntimeVersionType::Bundle, - source_spec_version: None, - source_transaction_version: None, - } - }, - source_sign: SourceSigningParams { - source_signer: Some("//Alice".into()), - source_signer_password: None, - source_signer_file: None, - source_signer_password_file: None, - source_transactions_mortality: None, - }, - target: TargetConnectionParams { - target_host: "127.0.0.1".into(), - target_port: 9001, - target_secure: false, - target_runtime_version: TargetRuntimeVersionParams { - target_version_mode: RuntimeVersionType::Bundle, - target_spec_version: None, - target_transaction_version: None, - } - }, - target_sign: TargetSigningParams { - target_signer: Some("//Bob".into()), - target_signer_password: None, - target_signer_file: None, - target_signer_password_file: None, - target_transactions_mortality: None, - }, - swap_type: TokenSwapType::LockUntilBlock { - blocks_before_expire: 1, - swap_nonce: None, - }, - source_balance: Balance(8000000000), - target_balance: Balance(9000000000), - target_to_source_conversion_rate_override: Some(ConversionRateOverride::Metric), - source_to_target_conversion_rate_override: Some(ConversionRateOverride::Explicit( - 84.56 - )), - } - ); - } -} diff --git a/polkadot/bridges/relays/bin-substrate/src/main.rs b/polkadot/bridges/relays/bin-substrate/src/main.rs deleted file mode 100644 index 13db6beefa6..00000000000 --- a/polkadot/bridges/relays/bin-substrate/src/main.rs +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate-to-substrate relay entrypoint. - -#![warn(missing_docs)] - -mod chains; -mod cli; - -fn main() { - let command = cli::parse_args(); - let run = command.run(); - let result = async_std::task::block_on(run); - if let Err(error) = result { - log::error!(target: "bridge", "Failed to start relay: {}", error); - } -} diff --git a/polkadot/bridges/relays/client-kusama/Cargo.toml b/polkadot/bridges/relays/client-kusama/Cargo.toml deleted file mode 100644 index 35c24c1089e..00000000000 --- a/polkadot/bridges/relays/client-kusama/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "relay-kusama-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -scale-info = { version = "2.0.1", features = ["derive"] } - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-kusama = { path = "../../primitives/chain-kusama" } -bp-message-dispatch = { path = "../../primitives/message-dispatch" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot = { path = "../../primitives/chain-polkadot" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -pallet-bridge-dispatch = { path = "../../modules/dispatch" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-kusama/src/lib.rs b/polkadot/bridges/relays/client-kusama/src/lib.rs deleted file mode 100644 index e228f2dc24d..00000000000 --- a/polkadot/bridges/relays/client-kusama/src/lib.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Kusama chain. - -use bp_messages::MessageNonce; -use codec::Encode; -use frame_support::weights::Weight; -use relay_substrate_client::{ - Chain, ChainBase, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, - Error as SubstrateError, SignParam, TransactionSignScheme, UnsignedTransaction, -}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -pub mod runtime; - -/// Kusama header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Kusama chain definition -#[derive(Debug, Clone, Copy)] -pub struct Kusama; - -impl ChainBase for Kusama { - type BlockNumber = bp_kusama::BlockNumber; - type Hash = bp_kusama::Hash; - type Hasher = bp_kusama::Hasher; - type Header = bp_kusama::Header; - - type AccountId = bp_kusama::AccountId; - type Balance = bp_kusama::Balance; - type Index = bp_kusama::Nonce; - type Signature = bp_kusama::Signature; - - fn max_extrinsic_size() -> u32 { - bp_kusama::Kusama::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_kusama::Kusama::max_extrinsic_weight() - } -} - -impl Chain for Kusama { - const NAME: &'static str = "Kusama"; - const TOKEN_ID: Option<&'static str> = Some("kusama"); - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = - bp_kusama::BEST_FINALIZED_KUSAMA_HEADER_METHOD; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); - const STORAGE_PROOF_OVERHEAD: u32 = bp_kusama::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_kusama::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = bp_kusama::SignedBlock; - type Call = crate::runtime::Call; - type WeightToFee = bp_kusama::WeightToFee; -} - -impl ChainWithGrandpa for Kusama { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = bp_kusama::WITH_KUSAMA_GRANDPA_PALLET_NAME; -} - -impl ChainWithMessages for Kusama { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - bp_kusama::WITH_KUSAMA_MESSAGES_PALLET_NAME; - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = - bp_kusama::TO_KUSAMA_MESSAGE_DETAILS_METHOD; - const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN: Weight = - bp_kusama::PAY_INBOUND_DISPATCH_FEE_WEIGHT; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - bp_kusama::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - bp_kusama::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); -} - -impl ChainWithBalances for Kusama { - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - StorageKey(bp_kusama::account_info_storage_key(account_id)) - } -} - -impl TransactionSignScheme for Kusama { - type Chain = Kusama; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = crate::runtime::UncheckedExtrinsic; - - fn sign_transaction(param: SignParam) -> Result { - let raw_payload = SignedPayload::new( - param.unsigned.call.clone(), - bp_kusama::SignedExtensions::new( - param.spec_version, - param.transaction_version, - param.era, - param.genesis_hash, - param.unsigned.nonce, - param.unsigned.tip, - ), - ) - .expect("SignedExtension never fails."); - - let signature = raw_payload.using_encoded(|payload| param.signer.sign(payload)); - let signer: sp_runtime::MultiSigner = param.signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - Ok(bp_kusama::UncheckedExtrinsic::new_signed( - call, - sp_runtime::MultiAddress::Id(signer.into_account()), - signature.into(), - extra, - )) - } - - fn is_signed(tx: &Self::SignedTransaction) -> bool { - tx.signature.is_some() - } - - fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { - tx.signature - .as_ref() - .map(|(address, _, _)| { - *address == bp_kusama::AccountId::from(*signer.public().as_array_ref()).into() - }) - .unwrap_or(false) - } - - fn parse_transaction(tx: Self::SignedTransaction) -> Option> { - let extra = &tx.signature.as_ref()?.2; - Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) - } -} - -/// Kusama header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; - -/// Kusama signing params. -pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/client-kusama/src/runtime.rs b/polkadot/bridges/relays/client-kusama/src/runtime.rs deleted file mode 100644 index 59a919e6cb9..00000000000 --- a/polkadot/bridges/relays/client-kusama/src/runtime.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that are specific to the Kusama runtime. - -use bp_messages::{LaneId, UnrewardedRelayersState}; -use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike}; -use bp_runtime::Chain; -use codec::{Compact, Decode, Encode}; -use frame_support::weights::Weight; -use scale_info::TypeInfo; -use sp_runtime::FixedU128; - -/// Unchecked Kusama extrinsic. -pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; - -/// Polkadot account ownership digest from Kusama. -/// -/// The byte vector returned by this function should be signed with a Polkadot account private key. -/// This way, the owner of `kusama_account_id` on Kusama proves that the Polkadot account private -/// key is also under his control. -pub fn kusama_to_polkadot_account_ownership_digest( - polkadot_call: &Call, - kusama_account_id: AccountId, - polkadot_spec_version: SpecVersion, -) -> Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_dispatch::account_ownership_digest( - polkadot_call, - kusama_account_id, - polkadot_spec_version, - bp_runtime::KUSAMA_CHAIN_ID, - bp_runtime::POLKADOT_CHAIN_ID, - ) -} - -/// Kusama Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to Kusama chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with Kusama -/// `construct_runtime`, so that we maintain SCALE-compatibility. -/// -/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/kusama/src/lib.rs) -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// System pallet. - #[codec(index = 0)] - System(SystemCall), - /// Balances pallet. - #[codec(index = 4)] - Balances(BalancesCall), - /// Utility pallet. - #[codec(index = 24)] - Utility(UtilityCall), - /// Polkadot bridge pallet. - #[codec(index = 110)] - BridgePolkadotGrandpa(BridgePolkadotGrandpaCall), - /// Polkadot messages pallet. - #[codec(index = 111)] - BridgePolkadotMessages(BridgePolkadotMessagesCall), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum SystemCall { - #[codec(index = 1)] - remark(Vec), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BalancesCall { - #[codec(index = 0)] - transfer(AccountAddress, Compact), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgePolkadotGrandpaCall { - #[codec(index = 0)] - submit_finality_proof( - Box<::Header>, - bp_header_chain::justification::GrandpaJustification<::Header>, - ), - #[codec(index = 1)] - initialize(bp_header_chain::InitializationData<::Header>), - #[codec(index = 3)] - set_operational(bool), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgePolkadotMessagesCall { - #[codec(index = 2)] - update_pallet_parameter(BridgePolkadotMessagesParameter), - #[codec(index = 3)] - send_message( - LaneId, - bp_message_dispatch::MessagePayload< - bp_kusama::AccountId, - bp_polkadot::AccountId, - bp_polkadot::AccountPublic, - Vec, - >, - bp_kusama::Balance, - ), - #[codec(index = 5)] - receive_messages_proof( - bp_polkadot::AccountId, - bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, - u32, - Weight, - ), - #[codec(index = 6)] - receive_messages_delivery_proof( - bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< - bp_polkadot::Hash, - >, - UnrewardedRelayersState, - ), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum UtilityCall { - #[codec(index = 2)] - batch_all(Vec), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum BridgePolkadotMessagesParameter { - #[codec(index = 0)] - PolkadotToKusamaConversionRate(FixedU128), -} - -impl sp_runtime::traits::Dispatchable for Call { - type Origin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - unimplemented!("The Call is not expected to be dispatched.") - } -} diff --git a/polkadot/bridges/relays/client-millau/Cargo.toml b/polkadot/bridges/relays/client-millau/Cargo.toml deleted file mode 100644 index 98932433455..00000000000 --- a/polkadot/bridges/relays/client-millau/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "relay-millau-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Supported Chains - -bp-messages = { path = "../../primitives/messages" } -bp-millau = { path = "../../primitives/chain-millau" } -millau-runtime = { path = "../../bin/millau/runtime" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-millau/src/lib.rs b/polkadot/bridges/relays/client-millau/src/lib.rs deleted file mode 100644 index eae9d9b4586..00000000000 --- a/polkadot/bridges/relays/client-millau/src/lib.rs +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Millau-Substrate chain. - -use bp_messages::MessageNonce; -use codec::{Compact, Decode, Encode}; -use frame_support::weights::Weight; -use relay_substrate_client::{ - BalanceOf, Chain, ChainBase, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, - Error as SubstrateError, IndexOf, SignParam, TransactionSignScheme, UnsignedTransaction, -}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -/// Millau header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Millau chain definition. -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct Millau; - -impl ChainBase for Millau { - type BlockNumber = millau_runtime::BlockNumber; - type Hash = millau_runtime::Hash; - type Hasher = millau_runtime::Hashing; - type Header = millau_runtime::Header; - - type AccountId = millau_runtime::AccountId; - type Balance = millau_runtime::Balance; - type Index = millau_runtime::Index; - type Signature = millau_runtime::Signature; - - fn max_extrinsic_size() -> u32 { - bp_millau::Millau::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_millau::Millau::max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Millau { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = bp_millau::WITH_MILLAU_GRANDPA_PALLET_NAME; -} - -impl ChainWithMessages for Millau { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - bp_millau::WITH_MILLAU_MESSAGES_PALLET_NAME; - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = - bp_millau::TO_MILLAU_MESSAGE_DETAILS_METHOD; - const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN: Weight = - bp_millau::PAY_INBOUND_DISPATCH_FEE_WEIGHT; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - bp_millau::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - bp_millau::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); -} - -impl Chain for Millau { - const NAME: &'static str = "Millau"; - // Rialto token has no value, but we associate it with KSM token - const TOKEN_ID: Option<&'static str> = Some("kusama"); - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = - bp_millau::BEST_FINALIZED_MILLAU_HEADER_METHOD; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); - const STORAGE_PROOF_OVERHEAD: u32 = bp_millau::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_millau::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = millau_runtime::SignedBlock; - type Call = millau_runtime::Call; - type WeightToFee = bp_millau::WeightToFee; -} - -impl ChainWithBalances for Millau { - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - use frame_support::storage::generator::StorageMap; - StorageKey(frame_system::Account::::storage_map_final_key( - account_id, - )) - } -} - -impl TransactionSignScheme for Millau { - type Chain = Millau; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = millau_runtime::UncheckedExtrinsic; - - fn sign_transaction(param: SignParam) -> Result { - let raw_payload = SignedPayload::from_raw( - param.unsigned.call.clone(), - ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(param.era.frame_era()), - frame_system::CheckNonce::::from(param.unsigned.nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(param.unsigned.tip), - ), - ( - (), - param.spec_version, - param.transaction_version, - param.genesis_hash, - param.era.signed_payload(param.genesis_hash), - (), - (), - (), - ), - ); - let signature = raw_payload.using_encoded(|payload| param.signer.sign(payload)); - let signer: sp_runtime::MultiSigner = param.signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - Ok(millau_runtime::UncheckedExtrinsic::new_signed( - call.into_decoded()?, - signer.into_account(), - signature.into(), - extra, - )) - } - - fn is_signed(tx: &Self::SignedTransaction) -> bool { - tx.signature.is_some() - } - - fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { - tx.signature - .as_ref() - .map(|(address, _, _)| { - *address == millau_runtime::Address::from(*signer.public().as_array_ref()) - }) - .unwrap_or(false) - } - - fn parse_transaction(tx: Self::SignedTransaction) -> Option> { - let extra = &tx.signature.as_ref()?.2; - Some(UnsignedTransaction { - call: tx.function.into(), - nonce: Compact::>::decode(&mut &extra.5.encode()[..]).ok()?.into(), - tip: Compact::>::decode(&mut &extra.7.encode()[..]) - .ok()? - .into(), - }) - } -} - -/// Millau signing params. -pub type SigningParams = sp_core::sr25519::Pair; - -/// Millau header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; - -#[cfg(test)] -mod tests { - use super::*; - use relay_substrate_client::TransactionEra; - - #[test] - fn parse_transaction_works() { - let unsigned = UnsignedTransaction { - call: millau_runtime::Call::System(millau_runtime::SystemCall::remark { - remark: b"Hello world!".to_vec(), - }) - .into(), - nonce: 777, - tip: 888, - }; - let signed_transaction = Millau::sign_transaction(SignParam { - spec_version: 42, - transaction_version: 50000, - genesis_hash: [42u8; 64].into(), - signer: sp_core::sr25519::Pair::from_seed_slice(&[1u8; 32]).unwrap(), - era: TransactionEra::immortal(), - unsigned: unsigned.clone(), - }) - .unwrap(); - let parsed_transaction = Millau::parse_transaction(signed_transaction).unwrap(); - assert_eq!(parsed_transaction, unsigned); - } -} diff --git a/polkadot/bridges/relays/client-polkadot/Cargo.toml b/polkadot/bridges/relays/client-polkadot/Cargo.toml deleted file mode 100644 index 96cfa2ce1ba..00000000000 --- a/polkadot/bridges/relays/client-polkadot/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "relay-polkadot-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -scale-info = { version = "2.0.1", features = ["derive"] } - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-kusama = { path = "../../primitives/chain-kusama" } -bp-message-dispatch = { path = "../../primitives/message-dispatch" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot = { path = "../../primitives/chain-polkadot" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -pallet-bridge-dispatch = { path = "../../modules/dispatch" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-polkadot/src/lib.rs b/polkadot/bridges/relays/client-polkadot/src/lib.rs deleted file mode 100644 index d4ada45e36c..00000000000 --- a/polkadot/bridges/relays/client-polkadot/src/lib.rs +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Polkadot chain. - -use bp_messages::MessageNonce; -use codec::Encode; -use frame_support::weights::Weight; -use relay_substrate_client::{ - Chain, ChainBase, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, - Error as SubstrateError, SignParam, TransactionSignScheme, UnsignedTransaction, -}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -pub mod runtime; - -/// Polkadot header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Polkadot chain definition -#[derive(Debug, Clone, Copy)] -pub struct Polkadot; - -impl ChainBase for Polkadot { - type BlockNumber = bp_polkadot::BlockNumber; - type Hash = bp_polkadot::Hash; - type Hasher = bp_polkadot::Hasher; - type Header = bp_polkadot::Header; - - type AccountId = bp_polkadot::AccountId; - type Balance = bp_polkadot::Balance; - type Index = bp_polkadot::Nonce; - type Signature = bp_polkadot::Signature; - - fn max_extrinsic_size() -> u32 { - bp_polkadot::Polkadot::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_polkadot::Polkadot::max_extrinsic_weight() - } -} - -impl Chain for Polkadot { - const NAME: &'static str = "Polkadot"; - const TOKEN_ID: Option<&'static str> = Some("polkadot"); - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = - bp_polkadot::BEST_FINALIZED_POLKADOT_HEADER_METHOD; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); - const STORAGE_PROOF_OVERHEAD: u32 = bp_polkadot::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_polkadot::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = bp_polkadot::SignedBlock; - type Call = crate::runtime::Call; - type WeightToFee = bp_polkadot::WeightToFee; -} - -impl ChainWithGrandpa for Polkadot { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = - bp_polkadot::WITH_POLKADOT_GRANDPA_PALLET_NAME; -} - -impl ChainWithMessages for Polkadot { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - bp_polkadot::WITH_POLKADOT_MESSAGES_PALLET_NAME; - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = - bp_polkadot::TO_POLKADOT_MESSAGE_DETAILS_METHOD; - const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN: Weight = - bp_polkadot::PAY_INBOUND_DISPATCH_FEE_WEIGHT; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - bp_polkadot::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - bp_polkadot::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); -} - -impl ChainWithBalances for Polkadot { - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - StorageKey(bp_polkadot::account_info_storage_key(account_id)) - } -} - -impl TransactionSignScheme for Polkadot { - type Chain = Polkadot; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = crate::runtime::UncheckedExtrinsic; - - fn sign_transaction(param: SignParam) -> Result { - let raw_payload = SignedPayload::new( - param.unsigned.call.clone(), - bp_polkadot::SignedExtensions::new( - param.spec_version, - param.transaction_version, - param.era, - param.genesis_hash, - param.unsigned.nonce, - param.unsigned.tip, - ), - ) - .expect("SignedExtension never fails."); - - let signature = raw_payload.using_encoded(|payload| param.signer.sign(payload)); - let signer: sp_runtime::MultiSigner = param.signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - Ok(bp_polkadot::UncheckedExtrinsic::new_signed( - call, - sp_runtime::MultiAddress::Id(signer.into_account()), - signature.into(), - extra, - )) - } - - fn is_signed(tx: &Self::SignedTransaction) -> bool { - tx.signature.is_some() - } - - fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { - tx.signature - .as_ref() - .map(|(address, _, _)| { - *address == bp_polkadot::AccountId::from(*signer.public().as_array_ref()).into() - }) - .unwrap_or(false) - } - - fn parse_transaction(tx: Self::SignedTransaction) -> Option> { - let extra = &tx.signature.as_ref()?.2; - Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) - } -} - -/// Polkadot header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; - -/// Polkadot signing params. -pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/client-polkadot/src/runtime.rs b/polkadot/bridges/relays/client-polkadot/src/runtime.rs deleted file mode 100644 index fa45115a6b5..00000000000 --- a/polkadot/bridges/relays/client-polkadot/src/runtime.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that are specific to the Polkadot runtime. - -use bp_messages::{LaneId, UnrewardedRelayersState}; -use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike}; -use bp_runtime::Chain; -use codec::{Compact, Decode, Encode}; -use frame_support::weights::Weight; -use scale_info::TypeInfo; -use sp_runtime::FixedU128; - -/// Unchecked Polkadot extrinsic. -pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; - -/// Kusama account ownership digest from Polkadot. -/// -/// The byte vector returned by this function should be signed with a Kusama account private key. -/// This way, the owner of `kusam_account_id` on Polkadot proves that the Kusama account private key -/// is also under his control. -pub fn polkadot_to_kusama_account_ownership_digest( - kusama_call: &Call, - kusam_account_id: AccountId, - kusama_spec_version: SpecVersion, -) -> Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_dispatch::account_ownership_digest( - kusama_call, - kusam_account_id, - kusama_spec_version, - bp_runtime::POLKADOT_CHAIN_ID, - bp_runtime::KUSAMA_CHAIN_ID, - ) -} - -/// Polkadot Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to Polkadot chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with Polkadot -/// `construct_runtime`, so that we maintain SCALE-compatibility. -/// -/// See: [link](https://github.com/paritytech/kusama/blob/master/runtime/kusam/src/lib.rs) -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// System pallet. - #[codec(index = 0)] - System(SystemCall), - /// Balances pallet. - #[codec(index = 5)] - Balances(BalancesCall), - /// Utility pallet. - #[codec(index = 26)] - Utility(UtilityCall), - /// Kusama bridge pallet. - #[codec(index = 110)] - BridgeKusamaGrandpa(BridgeKusamaGrandpaCall), - /// Kusama messages pallet. - #[codec(index = 111)] - BridgeKusamaMessages(BridgeKusamaMessagesCall), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum SystemCall { - #[codec(index = 1)] - remark(Vec), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BalancesCall { - #[codec(index = 0)] - transfer(AccountAddress, Compact), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeKusamaGrandpaCall { - #[codec(index = 0)] - submit_finality_proof( - Box<::Header>, - bp_header_chain::justification::GrandpaJustification<::Header>, - ), - #[codec(index = 1)] - initialize(bp_header_chain::InitializationData<::Header>), - #[codec(index = 3)] - set_operational(bool), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeKusamaMessagesCall { - #[codec(index = 2)] - update_pallet_parameter(BridgeKusamaMessagesParameter), - #[codec(index = 3)] - send_message( - LaneId, - bp_message_dispatch::MessagePayload< - bp_polkadot::AccountId, - bp_kusama::AccountId, - bp_kusama::AccountPublic, - Vec, - >, - bp_polkadot::Balance, - ), - #[codec(index = 5)] - receive_messages_proof( - bp_kusama::AccountId, - bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, - u32, - Weight, - ), - #[codec(index = 6)] - receive_messages_delivery_proof( - bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< - bp_kusama::Hash, - >, - UnrewardedRelayersState, - ), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum UtilityCall { - #[codec(index = 2)] - batch_all(Vec), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum BridgeKusamaMessagesParameter { - #[codec(index = 0)] - KusamaToPolkadotConversionRate(FixedU128), -} - -impl sp_runtime::traits::Dispatchable for Call { - type Origin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - unimplemented!("The Call is not expected to be dispatched.") - } -} diff --git a/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml b/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml deleted file mode 100644 index ebc28560643..00000000000 --- a/polkadot/bridges/relays/client-rialto-parachain/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "relay-rialto-parachain-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Bridge dependencies - -bp-rialto = { path = "../../primitives/chain-rialto" } -rialto-parachain-runtime = { path = "../../bin/rialto-parachain/runtime" } - -# Substrate Dependencies - -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs b/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs deleted file mode 100644 index 65bf46f660c..00000000000 --- a/polkadot/bridges/relays/client-rialto-parachain/src/lib.rs +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Rialto-Substrate chain. - -use frame_support::weights::Weight; -use relay_substrate_client::{Chain, ChainBase}; -use std::time::Duration; - -/// Rialto header id. -pub type HeaderId = - relay_utils::HeaderId; - -/// Rialto parachain definition -#[derive(Debug, Clone, Copy)] -pub struct RialtoParachain; - -impl ChainBase for RialtoParachain { - type BlockNumber = rialto_parachain_runtime::BlockNumber; - type Hash = rialto_parachain_runtime::Hash; - type Hasher = rialto_parachain_runtime::Hashing; - type Header = rialto_parachain_runtime::Header; - - type AccountId = rialto_parachain_runtime::AccountId; - type Balance = rialto_parachain_runtime::Balance; - type Index = rialto_parachain_runtime::Index; - type Signature = rialto_parachain_runtime::Signature; - - fn max_extrinsic_size() -> u32 { - bp_rialto::Rialto::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_rialto::Rialto::max_extrinsic_weight() - } -} - -impl Chain for RialtoParachain { - const NAME: &'static str = "RialtoParachain"; - const TOKEN_ID: Option<&'static str> = None; - // should be fixed/changed in https://github.com/paritytech/parity-bridges-common/pull/1199 - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = ""; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); - const STORAGE_PROOF_OVERHEAD: u32 = bp_rialto::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = rialto_parachain_runtime::SignedBlock; - type Call = rialto_parachain_runtime::Call; - type WeightToFee = bp_rialto::WeightToFee; -} diff --git a/polkadot/bridges/relays/client-rialto/Cargo.toml b/polkadot/bridges/relays/client-rialto/Cargo.toml deleted file mode 100644 index 37c55dd5f15..00000000000 --- a/polkadot/bridges/relays/client-rialto/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "relay-rialto-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Bridge dependencies - -bp-messages = { path = "../../primitives/messages" } -bp-rialto = { path = "../../primitives/chain-rialto" } -rialto-runtime = { path = "../../bin/rialto/runtime" } - -# Substrate Dependencies - -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-rialto/src/lib.rs b/polkadot/bridges/relays/client-rialto/src/lib.rs deleted file mode 100644 index 858227e8083..00000000000 --- a/polkadot/bridges/relays/client-rialto/src/lib.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Rialto-Substrate chain. - -use bp_messages::MessageNonce; -use codec::{Compact, Decode, Encode}; -use frame_support::weights::Weight; -use relay_substrate_client::{ - BalanceOf, Chain, ChainBase, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, - Error as SubstrateError, IndexOf, SignParam, TransactionSignScheme, UnsignedTransaction, -}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -/// Rialto header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Rialto chain definition -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct Rialto; - -impl ChainBase for Rialto { - type BlockNumber = rialto_runtime::BlockNumber; - type Hash = rialto_runtime::Hash; - type Hasher = rialto_runtime::Hashing; - type Header = rialto_runtime::Header; - - type AccountId = rialto_runtime::AccountId; - type Balance = rialto_runtime::Balance; - type Index = rialto_runtime::Index; - type Signature = rialto_runtime::Signature; - - fn max_extrinsic_size() -> u32 { - bp_rialto::Rialto::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_rialto::Rialto::max_extrinsic_weight() - } -} - -impl Chain for Rialto { - const NAME: &'static str = "Rialto"; - // Rialto token has no value, but we associate it with DOT token - const TOKEN_ID: Option<&'static str> = Some("polkadot"); - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = - bp_rialto::BEST_FINALIZED_RIALTO_HEADER_METHOD; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(5); - const STORAGE_PROOF_OVERHEAD: u32 = bp_rialto::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rialto::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = rialto_runtime::SignedBlock; - type Call = rialto_runtime::Call; - type WeightToFee = bp_rialto::WeightToFee; -} - -impl ChainWithGrandpa for Rialto { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = bp_rialto::WITH_RIALTO_GRANDPA_PALLET_NAME; -} - -impl ChainWithMessages for Rialto { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - bp_rialto::WITH_RIALTO_MESSAGES_PALLET_NAME; - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = - bp_rialto::TO_RIALTO_MESSAGE_DETAILS_METHOD; - const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN: Weight = - bp_rialto::PAY_INBOUND_DISPATCH_FEE_WEIGHT; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - bp_rialto::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); -} - -impl ChainWithBalances for Rialto { - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - use frame_support::storage::generator::StorageMap; - StorageKey(frame_system::Account::::storage_map_final_key( - account_id, - )) - } -} - -impl TransactionSignScheme for Rialto { - type Chain = Rialto; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = rialto_runtime::UncheckedExtrinsic; - - fn sign_transaction(param: SignParam) -> Result { - let raw_payload = SignedPayload::from_raw( - param.unsigned.call.clone(), - ( - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckTxVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(param.era.frame_era()), - frame_system::CheckNonce::::from(param.unsigned.nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(param.unsigned.tip), - ), - ( - (), - param.spec_version, - param.transaction_version, - param.genesis_hash, - param.era.signed_payload(param.genesis_hash), - (), - (), - (), - ), - ); - let signature = raw_payload.using_encoded(|payload| param.signer.sign(payload)); - let signer: sp_runtime::MultiSigner = param.signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - Ok(rialto_runtime::UncheckedExtrinsic::new_signed( - call.into_decoded()?, - signer.into_account().into(), - signature.into(), - extra, - )) - } - - fn is_signed(tx: &Self::SignedTransaction) -> bool { - tx.signature.is_some() - } - - fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { - tx.signature - .as_ref() - .map(|(address, _, _)| *address == rialto_runtime::Address::Id(signer.public().into())) - .unwrap_or(false) - } - - fn parse_transaction(tx: Self::SignedTransaction) -> Option> { - let extra = &tx.signature.as_ref()?.2; - Some(UnsignedTransaction { - call: tx.function.into(), - nonce: Compact::>::decode(&mut &extra.5.encode()[..]).ok()?.into(), - tip: Compact::>::decode(&mut &extra.7.encode()[..]) - .ok()? - .into(), - }) - } -} - -/// Rialto signing params. -pub type SigningParams = sp_core::sr25519::Pair; - -/// Rialto header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; - -#[cfg(test)] -mod tests { - use super::*; - use relay_substrate_client::TransactionEra; - - #[test] - fn parse_transaction_works() { - let unsigned = UnsignedTransaction { - call: rialto_runtime::Call::System(rialto_runtime::SystemCall::remark { - remark: b"Hello world!".to_vec(), - }) - .into(), - nonce: 777, - tip: 888, - }; - let signed_transaction = Rialto::sign_transaction(SignParam { - spec_version: 42, - transaction_version: 50000, - genesis_hash: [42u8; 32].into(), - signer: sp_core::sr25519::Pair::from_seed_slice(&[1u8; 32]).unwrap(), - era: TransactionEra::immortal(), - unsigned: unsigned.clone(), - }) - .unwrap(); - let parsed_transaction = Rialto::parse_transaction(signed_transaction).unwrap(); - assert_eq!(parsed_transaction, unsigned); - } -} diff --git a/polkadot/bridges/relays/client-rococo/Cargo.toml b/polkadot/bridges/relays/client-rococo/Cargo.toml deleted file mode 100644 index 2b78684a853..00000000000 --- a/polkadot/bridges/relays/client-rococo/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "relay-rococo-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -scale-info = { version = "2.0.1", features = ["derive"] } - -# Bridge dependencies - -bridge-runtime-common = { path = "../../bin/runtime-common" } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-message-dispatch = { path = "../../primitives/message-dispatch" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-rococo = { path = "../../primitives/chain-rococo" } -bp-runtime = { path = "../../primitives/runtime" } -bp-wococo = { path = "../../primitives/chain-wococo" } -pallet-bridge-dispatch = { path = "../../modules/dispatch" } -pallet-bridge-messages = { path = "../../modules/messages" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-rococo/src/lib.rs b/polkadot/bridges/relays/client-rococo/src/lib.rs deleted file mode 100644 index f63041df9ed..00000000000 --- a/polkadot/bridges/relays/client-rococo/src/lib.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Rococo-Substrate chain. - -use bp_messages::MessageNonce; -use codec::Encode; -use frame_support::weights::Weight; -use relay_substrate_client::{ - Chain, ChainBase, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, - Error as SubstrateError, SignParam, TransactionSignScheme, UnsignedTransaction, -}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -pub mod runtime; - -/// Rococo header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Rococo header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; - -/// Rococo chain definition -#[derive(Debug, Clone, Copy)] -pub struct Rococo; - -impl ChainBase for Rococo { - type BlockNumber = bp_rococo::BlockNumber; - type Hash = bp_rococo::Hash; - type Hasher = bp_rococo::Hashing; - type Header = bp_rococo::Header; - - type AccountId = bp_rococo::AccountId; - type Balance = bp_rococo::Balance; - type Index = bp_rococo::Nonce; - type Signature = bp_rococo::Signature; - - fn max_extrinsic_size() -> u32 { - bp_rococo::Rococo::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_rococo::Rococo::max_extrinsic_weight() - } -} - -impl Chain for Rococo { - const NAME: &'static str = "Rococo"; - const TOKEN_ID: Option<&'static str> = None; - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = - bp_rococo::BEST_FINALIZED_ROCOCO_HEADER_METHOD; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); - const STORAGE_PROOF_OVERHEAD: u32 = bp_rococo::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_rococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = bp_rococo::SignedBlock; - type Call = crate::runtime::Call; - type WeightToFee = bp_rococo::WeightToFee; -} - -impl ChainWithGrandpa for Rococo { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = bp_rococo::WITH_ROCOCO_GRANDPA_PALLET_NAME; -} - -impl ChainWithMessages for Rococo { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - bp_rococo::WITH_ROCOCO_MESSAGES_PALLET_NAME; - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = - bp_rococo::TO_ROCOCO_MESSAGE_DETAILS_METHOD; - const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN: Weight = - bp_rococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - bp_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - bp_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); -} - -impl ChainWithBalances for Rococo { - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - StorageKey(bp_rococo::account_info_storage_key(account_id)) - } -} - -impl TransactionSignScheme for Rococo { - type Chain = Rococo; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = crate::runtime::UncheckedExtrinsic; - - fn sign_transaction(param: SignParam) -> Result { - let raw_payload = SignedPayload::new( - param.unsigned.call.clone(), - bp_rococo::SignedExtensions::new( - param.spec_version, - param.transaction_version, - param.era, - param.genesis_hash, - param.unsigned.nonce, - param.unsigned.tip, - ), - ) - .expect("SignedExtension never fails."); - - let signature = raw_payload.using_encoded(|payload| param.signer.sign(payload)); - let signer: sp_runtime::MultiSigner = param.signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - Ok(bp_rococo::UncheckedExtrinsic::new_signed( - call, - sp_runtime::MultiAddress::Id(signer.into_account()), - signature.into(), - extra, - )) - } - - fn is_signed(tx: &Self::SignedTransaction) -> bool { - tx.signature.is_some() - } - - fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { - tx.signature - .as_ref() - .map(|(address, _, _)| { - *address == bp_rococo::AccountId::from(*signer.public().as_array_ref()).into() - }) - .unwrap_or(false) - } - - fn parse_transaction(tx: Self::SignedTransaction) -> Option> { - let extra = &tx.signature.as_ref()?.2; - Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) - } -} - -/// Rococo signing params. -pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/client-rococo/src/runtime.rs b/polkadot/bridges/relays/client-rococo/src/runtime.rs deleted file mode 100644 index b1380805996..00000000000 --- a/polkadot/bridges/relays/client-rococo/src/runtime.rs +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that are specific to the Rococo runtime. - -use bp_messages::{LaneId, UnrewardedRelayersState}; -use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike}; -use bp_runtime::Chain; -use codec::{Compact, Decode, Encode}; -use frame_support::weights::Weight; -use scale_info::TypeInfo; - -/// Unchecked Rococo extrinsic. -pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; - -/// Wococo account ownership digest from Rococo. -/// -/// The byte vector returned by this function should be signed with a Wococo account private key. -/// This way, the owner of `rococo_account_id` on Rococo proves that the Wococo account private key -/// is also under his control. -pub fn rococo_to_wococo_account_ownership_digest( - wococo_call: &Call, - rococo_account_id: AccountId, - wococo_spec_version: SpecVersion, -) -> Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_dispatch::account_ownership_digest( - wococo_call, - rococo_account_id, - wococo_spec_version, - bp_runtime::ROCOCO_CHAIN_ID, - bp_runtime::WOCOCO_CHAIN_ID, - ) -} - -/// Rococo Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to Rococo chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo -/// `construct_runtime`, so that we maintain SCALE-compatibility. -/// -/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs) -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// System pallet. - #[codec(index = 0)] - System(SystemCall), - /// Balances pallet. - #[codec(index = 4)] - Balances(BalancesCall), - /// Wococo bridge pallet. - #[codec(index = 41)] - BridgeGrandpaWococo(BridgeGrandpaWococoCall), - /// Wococo messages pallet. - #[codec(index = 44)] - BridgeWococoMessages(BridgeWococoMessagesCall), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum SystemCall { - #[codec(index = 1)] - remark(Vec), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BalancesCall { - #[codec(index = 0)] - transfer(AccountAddress, Compact), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeGrandpaWococoCall { - #[codec(index = 0)] - submit_finality_proof( - Box<::Header>, - bp_header_chain::justification::GrandpaJustification<::Header>, - ), - #[codec(index = 1)] - initialize(bp_header_chain::InitializationData<::Header>), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeWococoMessagesCall { - #[codec(index = 3)] - send_message( - LaneId, - bp_message_dispatch::MessagePayload< - bp_rococo::AccountId, - bp_wococo::AccountId, - bp_wococo::AccountPublic, - Vec, - >, - bp_rococo::Balance, - ), - #[codec(index = 5)] - receive_messages_proof( - bp_wococo::AccountId, - bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, - u32, - Weight, - ), - #[codec(index = 6)] - receive_messages_delivery_proof( - bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< - bp_wococo::Hash, - >, - UnrewardedRelayersState, - ), -} - -impl sp_runtime::traits::Dispatchable for Call { - type Origin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - unimplemented!("The Call is not expected to be dispatched.") - } -} diff --git a/polkadot/bridges/relays/client-substrate/Cargo.toml b/polkadot/bridges/relays/client-substrate/Cargo.toml deleted file mode 100644 index dad864965e2..00000000000 --- a/polkadot/bridges/relays/client-substrate/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "relay-substrate-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = { version = "1.6.5", features = ["attributes"] } -async-trait = "0.1.40" -codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpsee = { version = "0.8", features = ["macros", "ws-client"] } -log = "0.4.11" -num-traits = "0.2" -rand = "0.7" -serde = { version = "1.0" } -tokio = { version = "1.8", features = ["rt-multi-thread"] } -thiserror = "1.0.26" - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-runtime = { path = "../../primitives/runtime" } -pallet-bridge-messages = { path = "../../modules/messages" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master" } - -#[dev-dependencies] -futures = "0.3.7" diff --git a/polkadot/bridges/relays/client-substrate/src/chain.rs b/polkadot/bridges/relays/client-substrate/src/chain.rs deleted file mode 100644 index 60adfb0a88a..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/chain.rs +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use bp_messages::MessageNonce; -use bp_runtime::{Chain as ChainBase, EncodedOrDecodedCall, HashOf, TransactionEraOf}; -use codec::{Codec, Encode}; -use frame_support::weights::{Weight, WeightToFeePolynomial}; -use jsonrpsee::core::{DeserializeOwned, Serialize}; -use num_traits::Zero; -use sc_transaction_pool_api::TransactionStatus; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{ - generic::SignedBlock, - traits::{Block as BlockT, Dispatchable, Member}, - EncodedJustification, -}; -use std::{fmt::Debug, time::Duration}; - -/// Substrate-based chain from minimal relay-client point of view. -pub trait Chain: ChainBase + Clone { - /// Chain name. - const NAME: &'static str; - /// Identifier of the basic token of the chain (if applicable). - /// - /// This identifier is used to fetch token price. In case of testnets, you may either - /// set it to `None`, or associate testnet with one of the existing tokens. - const TOKEN_ID: Option<&'static str>; - /// Name of the runtime API method that is returning best known finalized header number - /// and hash (as tuple). - /// - /// Keep in mind that this method is normally provided by the other chain, which is - /// bridged with this chain. - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str; - - /// Average block interval. - /// - /// How often blocks are produced on that chain. It's suggested to set this value - /// to match the block time of the chain. - const AVERAGE_BLOCK_INTERVAL: Duration; - /// Maximal expected storage proof overhead (in bytes). - const STORAGE_PROOF_OVERHEAD: u32; - /// Maximal size (in bytes) of SCALE-encoded account id on this chain. - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32; - - /// Block type. - type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; - /// The aggregated `Call` type. - type Call: Clone + Codec + Dispatchable + Debug + Send; - - /// Type that is used by the chain, to convert from weight to fee. - type WeightToFee: WeightToFeePolynomial; -} - -/// Substrate-based chain that is using direct GRANDPA finality from minimal relay-client point of -/// view. -/// -/// Keep in mind that parachains are relying on relay chain GRANDPA, so they should not implement -/// this trait. -pub trait ChainWithGrandpa: Chain { - /// Name of the bridge GRANDPA pallet (used in `construct_runtime` macro call) that is deployed - /// at some other chain to bridge with this `ChainWithGrandpa`. - /// - /// We assume that all chains that are bridging with this `ChainWithGrandpa` are using - /// the same name. - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str; -} - -/// Substrate-based chain with messaging support from minimal relay-client point of view. -pub trait ChainWithMessages: Chain { - /// Name of the bridge messages pallet (used in `construct_runtime` macro call) that is deployed - /// at some other chain to bridge with this `ChainWithMessages`. - /// - /// We assume that all chains that are bridging with this `ChainWithMessages` are using - /// the same name. - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str; - - /// Name of the `ToOutboundLaneApi::message_details` runtime API method. - /// The method is provided by the runtime that is bridged with this `ChainWithMessages`. - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str; - - /// Additional weight of the dispatch fee payment if dispatch is paid at the target chain - /// and this `ChainWithMessages` is the target chain. - const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN: Weight; - - /// Maximal number of unrewarded relayers in a single confirmation transaction at this - /// `ChainWithMessages`. - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce; - /// Maximal number of unconfirmed messages in a single confirmation transaction at this - /// `ChainWithMessages`. - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce; - - /// Weights of message pallet calls. - type WeightInfo: pallet_bridge_messages::WeightInfoExt; -} - -/// Call type used by the chain. -pub type CallOf = ::Call; -/// Weight-to-Fee type used by the chain. -pub type WeightToFeeOf = ::WeightToFee; -/// Transaction status of the chain. -pub type TransactionStatusOf = TransactionStatus, HashOf>; - -/// Substrate-based chain with `AccountData` generic argument of `frame_system::AccountInfo` set to -/// the `pallet_balances::AccountData`. -pub trait ChainWithBalances: Chain { - /// Return runtime storage key for getting `frame_system::AccountInfo` of given account. - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey; -} - -/// SCALE-encoded extrinsic. -pub type EncodedExtrinsic = Vec; - -/// Block with justification. -pub trait BlockWithJustification

{ - /// Return block header. - fn header(&self) -> Header; - /// Return encoded block extrinsics. - fn extrinsics(&self) -> Vec; - /// Return block justification, if known. - fn justification(&self) -> Option<&EncodedJustification>; -} - -/// Transaction before it is signed. -#[derive(Clone, Debug, PartialEq)] -pub struct UnsignedTransaction { - /// Runtime call of this transaction. - pub call: EncodedOrDecodedCall, - /// Transaction nonce. - pub nonce: C::Index, - /// Tip included into transaction. - pub tip: C::Balance, -} - -impl UnsignedTransaction { - /// Create new unsigned transaction with given call, nonce and zero tip. - pub fn new(call: EncodedOrDecodedCall, nonce: C::Index) -> Self { - Self { call, nonce, tip: Zero::zero() } - } - - /// Set transaction tip. - pub fn tip(mut self, tip: C::Balance) -> Self { - self.tip = tip; - self - } -} - -/// Account key pair used by transactions signing scheme. -pub type AccountKeyPairOf = ::AccountKeyPair; - -/// Substrate-based chain transactions signing scheme. -pub trait TransactionSignScheme { - /// Chain that this scheme is to be used. - type Chain: Chain; - /// Type of key pairs used to sign transactions. - type AccountKeyPair: Pair; - /// Signed transaction. - type SignedTransaction: Clone + Debug + Codec + Send + 'static; - - /// Create transaction for given runtime call, signed by given account. - fn sign_transaction(param: SignParam) -> Result - where - Self: Sized; - - /// Returns true if transaction is signed. - fn is_signed(tx: &Self::SignedTransaction) -> bool; - - /// Returns true if transaction is signed by given signer. - fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool; - - /// Parse signed transaction into its unsigned part. - /// - /// Returns `None` if signed transaction has unsupported format. - fn parse_transaction(tx: Self::SignedTransaction) -> Option>; -} - -/// Sign transaction parameters -pub struct SignParam { - /// Version of the runtime specification. - pub spec_version: u32, - /// Transaction version - pub transaction_version: u32, - /// Hash of the genesis block. - pub genesis_hash: ::Hash, - /// Signer account - pub signer: T::AccountKeyPair, - /// Transaction era used by the chain. - pub era: TransactionEraOf, - /// Transaction before it is signed. - pub unsigned: UnsignedTransaction, -} - -impl BlockWithJustification for SignedBlock { - fn header(&self) -> Block::Header { - self.block.header().clone() - } - - fn extrinsics(&self) -> Vec { - self.block.extrinsics().iter().map(Encode::encode).collect() - } - - fn justification(&self) -> Option<&EncodedJustification> { - self.justifications - .as_ref() - .and_then(|j| j.get(sp_finality_grandpa::GRANDPA_ENGINE_ID)) - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/client.rs b/polkadot/bridges/relays/client-substrate/src/client.rs deleted file mode 100644 index 3725aa3ff45..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/client.rs +++ /dev/null @@ -1,787 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node client. - -use crate::{ - chain::{Chain, ChainWithBalances, TransactionStatusOf}, - rpc::SubstrateClient, - AccountIdOf, BlockNumberOf, ConnectionParams, Error, HashOf, HeaderIdOf, HeaderOf, IndexOf, - Result, -}; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use codec::{Decode, Encode}; -use frame_system::AccountInfo; -use futures::{SinkExt, StreamExt}; -use jsonrpsee::{ - core::{client::SubscriptionClientT, DeserializeOwned}, - types::params::ParamsSer, - ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}, -}; -use num_traits::{Bounded, CheckedSub, One, Zero}; -use pallet_balances::AccountData; -use pallet_transaction_payment::InclusionFee; -use relay_utils::{relay_loop::RECONNECT_DELAY, HeaderId}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, Hasher, -}; -use sp_runtime::{ - traits::Header as HeaderT, - transaction_validity::{TransactionSource, TransactionValidity}, -}; -use sp_trie::StorageProof; -use sp_version::RuntimeVersion; -use std::future::Future; - -const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; -const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; -const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; - -/// Opaque justifications subscription type. -pub struct Subscription(Mutex>>); - -/// Opaque GRANDPA authorities set. -pub type OpaqueGrandpaAuthoritiesSet = Vec; - -/// Chain runtime version in client -#[derive(Clone, Debug)] -pub enum ChainRuntimeVersion { - /// Auto query from chain. - Auto, - /// Custom runtime version, defined by user. - /// the first is `spec_version` - /// the second is `transaction_version` - Custom(u32, u32), -} - -/// Substrate client type. -/// -/// Cloning `Client` is a cheap operation. -pub struct Client { - /// Tokio runtime handle. - tokio: Arc, - /// Client connection params. - params: ConnectionParams, - /// Substrate RPC client. - client: Arc, - /// Genesis block hash. - genesis_hash: HashOf, - /// If several tasks are submitting their transactions simultaneously using - /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of - /// transactions will be rejected from the pool. This lock is here to prevent situations like - /// that. - submit_signed_extrinsic_lock: Arc>, - /// Saved chain runtime version - chain_runtime_version: ChainRuntimeVersion, -} - -#[async_trait] -impl relay_utils::relay_loop::Client for Client { - type Error = Error; - - async fn reconnect(&mut self) -> Result<()> { - let (tokio, client) = Self::build_client(self.params.clone()).await?; - self.tokio = tokio; - self.client = client; - Ok(()) - } -} - -impl Clone for Client { - fn clone(&self) -> Self { - Client { - tokio: self.tokio.clone(), - params: self.params.clone(), - client: self.client.clone(), - genesis_hash: self.genesis_hash, - submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), - chain_runtime_version: self.chain_runtime_version.clone(), - } - } -} - -impl std::fmt::Debug for Client { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish() - } -} - -impl Client { - /// Returns client that is able to call RPCs on Substrate node over websocket connection. - /// - /// This function will keep connecting to given Substrate node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to {} node: {:?}. Going to retry in {}s", - C::NAME, - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection - /// has been established or error otherwise. - pub async fn try_connect(params: ConnectionParams) -> Result { - let (tokio, client) = Self::build_client(params.clone()).await?; - - let number: C::BlockNumber = Zero::zero(); - let genesis_hash_client = client.clone(); - let genesis_hash = tokio - .spawn(async move { - SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::chain_get_block_hash(&*genesis_hash_client, Some(number)) - .await - }) - .await??; - - let chain_runtime_version = params.chain_runtime_version.clone(); - Ok(Self { - tokio, - params, - client, - genesis_hash, - submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), - chain_runtime_version, - }) - } - - /// Build client to use in connection. - async fn build_client( - params: ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - let uri = format!( - "{}://{}:{}", - if params.secure { "wss" } else { "ws" }, - params.host, - params.port, - ); - let client = tokio - .spawn(async move { - RpcClientBuilder::default() - .max_notifs_per_subscription(MAX_SUBSCRIPTION_CAPACITY) - .build(&uri) - .await - }) - .await??; - - Ok((Arc::new(tokio), Arc::new(client))) - } -} - -impl Client { - /// Return simple runtime version, only include `spec_version` and `transaction_version`. - pub async fn simple_runtime_version(&self) -> Result<(u32, u32)> { - let (spec_version, transaction_version) = match self.chain_runtime_version { - ChainRuntimeVersion::Auto => { - let runtime_version = self.runtime_version().await?; - (runtime_version.spec_version, runtime_version.transaction_version) - }, - ChainRuntimeVersion::Custom(spec_version, transaction_version) => - (spec_version, transaction_version), - }; - Ok((spec_version, transaction_version)) - } - - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(|client| async move { - let health = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::system_health(&*client) - .await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } - }) - .await - } - - /// Return hash of the genesis block. - pub fn genesis_hash(&self) -> &C::Hash { - &self.genesis_hash - } - - /// Return hash of the best finalized block. - pub async fn best_finalized_header_hash(&self) -> Result { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::chain_get_finalized_head(&*client) - .await?) - }) - .await - } - - /// Return number of the best finalized block. - pub async fn best_finalized_header_number(&self) -> Result { - Ok(*self.header_by_hash(self.best_finalized_header_hash().await?).await?.number()) - } - - /// Returns the best Substrate header. - pub async fn best_header(&self) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::chain_get_header(&*client, None) - .await?) - }) - .await - } - - /// Get a Substrate block from its hash. - pub async fn get_block(&self, block_hash: Option) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::chain_get_block(&*client, block_hash) - .await?) - }) - .await - } - - /// Get a Substrate header by its hash. - pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::chain_get_header(&*client, Some(block_hash)) - .await?) - }) - .await - } - - /// Get a Substrate block hash by its number. - pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::chain_get_block_hash(&*client, Some(number)) - .await?) - }) - .await - } - - /// Get a Substrate header by its number. - pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result - where - C::Header: DeserializeOwned, - { - let block_hash = Self::block_hash_by_number(self, block_number).await?; - let header_by_hash = Self::header_by_hash(self, block_hash).await?; - Ok(header_by_hash) - } - - /// Return runtime version. - pub async fn runtime_version(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::state_runtime_version(&*client) - .await?) - }) - .await - } - - /// Read value from runtime storage. - pub async fn storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read raw value from runtime storage. - pub async fn raw_storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::state_get_storage(&*client, storage_key, block_hash) - .await?) - }) - .await - } - - /// Return native tokens balance of the account. - pub async fn free_native_balance(&self, account: C::AccountId) -> Result - where - C: ChainWithBalances, - { - self.jsonrpsee_execute(move |client| async move { - let storage_key = C::account_info_storage_key(&account); - let encoded_account_data = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::state_get_storage(&*client, storage_key, None) - .await? - .ok_or(Error::AccountDoesNotExist)?; - let decoded_account_data = AccountInfo::>::decode( - &mut &encoded_account_data.0[..], - ) - .map_err(Error::ResponseParseFailed)?; - Ok(decoded_account_data.data.free) - }) - .await - } - - /// Get the nonce of the given Substrate account. - /// - /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. - pub async fn next_account_index(&self, account: C::AccountId) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::system_account_next_index(&*client, account) - .await?) - }) - .await - } - - /// Submit unsigned extrinsic for inclusion in a block. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { - self.jsonrpsee_execute(move |client| async move { - let tx_hash = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::author_submit_extrinsic(&*client, transaction) - .await?; - log::trace!(target: "bridge", "Sent transaction to Substrate node: {:?}", tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Submit an extrinsic signed by given account. - /// - /// All calls of this method are synchronized, so there can't be more than one active - /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen - /// if all client instances are clones of the same initial `Client`. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_signed_extrinsic( - &self, - extrinsic_signer: C::AccountId, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Index) -> Result + Send + 'static, - ) -> Result { - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(extrinsic_signer).await?; - let best_header = self.best_header().await?; - - // By using parent of best block here, we are protecing again best-block reorganizations. - // E.g. transaction my have been submitted when the best block was `A[num=100]`. Then it has - // been changed to `B[num=100]`. Hash of `A` has been included into transaction signature - // payload. So when signature will be checked, the check will fail and transaction will be - // dropped from the pool. - let best_header_id = match best_header.number().checked_sub(&One::one()) { - Some(parent_block_number) => HeaderId(parent_block_number, *best_header.parent_hash()), - None => HeaderId(*best_header.number(), best_header.hash()), - }; - - self.jsonrpsee_execute(move |client| async move { - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let tx_hash = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::author_submit_extrinsic(&*client, extrinsic) - .await?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status - /// after submission. - pub async fn submit_and_watch_signed_extrinsic( - &self, - extrinsic_signer: C::AccountId, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Index) -> Result + Send + 'static, - ) -> Result>> { - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(extrinsic_signer).await?; - let best_header = self.best_header().await?; - let best_header_id = HeaderId(*best_header.number(), best_header.hash()); - let subscription = self - .jsonrpsee_execute(move |client| async move { - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let tx_hash = C::Hasher::hash(&extrinsic.0); - let subscription = client - .subscribe( - "author_submitAndWatchExtrinsic", - Some(ParamsSer::Array(vec![jsonrpsee::core::to_json_value(extrinsic) - .map_err(|e| Error::RpcError(e.into()))?])), - "author_unwatchExtrinsic", - ) - .await?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(subscription) - }) - .await?; - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - self.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "extrinsic".into(), - subscription, - sender, - )); - Ok(Subscription(Mutex::new(receiver))) - } - - /// Returns pending extrinsics from transaction pool. - pub async fn pending_extrinsics(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::author_pending_extrinsics(&*client) - .await?) - }) - .await - } - - /// Validate transaction at given block state. - pub async fn validate_transaction( - &self, - at_block: C::Hash, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string(); - let data = Bytes((TransactionSource::External, transaction, at_block).encode()); - - let encoded_response = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::state_call(&*client, call, data, Some(at_block)) - .await?; - let validity = TransactionValidity::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(validity) - }) - .await - } - - /// Estimate fee that will be spent on given extrinsic. - pub async fn estimate_extrinsic_fee( - &self, - transaction: Bytes, - ) -> Result> { - self.jsonrpsee_execute(move |client| async move { - let fee_details = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::payment_query_fee_details(&*client, transaction, None) - .await?; - let inclusion_fee = fee_details - .inclusion_fee - .map(|inclusion_fee| InclusionFee { - base_fee: C::Balance::try_from(inclusion_fee.base_fee.into_u256()) - .unwrap_or_else(|_| C::Balance::max_value()), - len_fee: C::Balance::try_from(inclusion_fee.len_fee.into_u256()) - .unwrap_or_else(|_| C::Balance::max_value()), - adjusted_weight_fee: C::Balance::try_from( - inclusion_fee.adjusted_weight_fee.into_u256(), - ) - .unwrap_or_else(|_| C::Balance::max_value()), - }) - .unwrap_or_else(|| InclusionFee { - base_fee: Zero::zero(), - len_fee: Zero::zero(), - adjusted_weight_fee: Zero::zero(), - }); - Ok(inclusion_fee) - }) - .await - } - - /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set( - &self, - block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::state_call(&*client, call, data, Some(block)) - .await?; - let authority_list = encoded_response.0; - - Ok(authority_list) - }) - .await - } - - /// Execute runtime call at given block. - pub async fn state_call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::state_call(&*client, method, data, at_block) - .await - .map_err(Into::into) - }) - .await - } - - /// Returns storage proof of given storage keys. - pub async fn prove_storage( - &self, - keys: Vec, - at_block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::state_prove_storage(&*client, keys, Some(at_block)) - .await - .map(|proof| { - StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect::>()) - }) - .map_err(Into::into) - }) - .await - } - - /// Return `tokenDecimals` property from the set of chain properties. - pub async fn token_decimals(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - let system_properties = SubstrateClient::< - AccountIdOf, - BlockNumberOf, - HashOf, - HeaderOf, - IndexOf, - C::SignedBlock, - >::system_properties(&*client) - .await?; - Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) - }) - .await - } - - /// Return new justifications stream. - pub async fn subscribe_justifications(&self) -> Result> { - let subscription = self - .jsonrpsee_execute(move |client| async move { - Ok(client - .subscribe( - "grandpa_subscribeJustifications", - None, - "grandpa_unsubscribeJustifications", - ) - .await?) - }) - .await?; - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - self.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "justification".into(), - subscription, - sender, - )); - Ok(Subscription(Mutex::new(receiver))) - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send, - T: Send + 'static, - { - let client = self.client.clone(); - self.tokio.spawn(async move { make_jsonrpsee_future(client).await }).await? - } -} - -impl Subscription { - /// Return next item from the subscription. - pub async fn next(&self) -> Result> { - let mut receiver = self.0.lock().await; - let item = receiver.next().await; - Ok(item.unwrap_or(None)) - } - - /// Background worker that is executed in tokio context as `jsonrpsee` requires. - async fn background_worker( - chain_name: String, - item_type: String, - mut subscription: jsonrpsee::core::client::Subscription, - mut sender: futures::channel::mpsc::Sender>, - ) { - loop { - match subscription.next().await { - Some(Ok(item)) => - if sender.send(Some(item)).await.is_err() { - break - }, - Some(Err(e)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted.", - chain_name, - item_type, - e, - ); - let _ = sender.send(None).await; - break - }, - None => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted.", - chain_name, - item_type, - ); - let _ = sender.send(None).await; - break - }, - } - } - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/error.rs b/polkadot/bridges/relays/client-substrate/src/error.rs deleted file mode 100644 index e698f2596c5..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/error.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node RPC errors. - -use jsonrpsee::core::Error as RpcError; -use relay_utils::MaybeConnectionError; -use sc_rpc_api::system::Health; -use sp_runtime::transaction_validity::TransactionValidityError; -use thiserror::Error; - -/// Result type used by Substrate client. -pub type Result = std::result::Result; - -/// Errors that can occur only when interacting with -/// a Substrate node through RPC. -#[derive(Error, Debug)] -pub enum Error { - /// IO error. - #[error("IO error: {0}")] - Io(#[from] std::io::Error), - /// An error that can occur when making a request to - /// an JSON-RPC server. - #[error("RPC error: {0}")] - RpcError(#[from] RpcError), - /// The response from the server could not be SCALE decoded. - #[error("Response parse failed: {0}")] - ResponseParseFailed(#[from] codec::Error), - /// The Substrate bridge pallet has not yet been initialized. - #[error("The Substrate bridge pallet has not been initialized yet.")] - UninitializedBridgePallet, - /// Account does not exist on the chain. - #[error("Account does not exist on the chain.")] - AccountDoesNotExist, - /// Runtime storage is missing mandatory ":code:" entry. - #[error("Mandatory :code: entry is missing from runtime storage.")] - MissingMandatoryCodeEntry, - /// The client we're connected to is not synced, so we can't rely on its state. - #[error("Substrate client is not synced {0}.")] - ClientNotSynced(Health), - /// The bridge pallet is halted and all transactions will be rejected. - #[error("Bridge pallet is halted.")] - BridgePalletIsHalted, - /// An error has happened when we have tried to parse storage proof. - #[error("Error when parsing storage proof: {0:?}.")] - StorageProofError(bp_runtime::StorageProofError), - /// The Substrate transaction is invalid. - #[error("Substrate transaction is invalid: {0:?}")] - TransactionInvalid(#[from] TransactionValidityError), - /// Custom logic error. - #[error("{0}")] - Custom(String), -} - -impl From for Error { - fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {}", error)) - } -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - matches!( - *self, - Error::RpcError(RpcError::Transport(_)) - // right now if connection to the ws server is dropped (after it is already established), - // we're getting this error - | Error::RpcError(RpcError::Internal(_)) - | Error::RpcError(RpcError::RestartNeeded(_)) - | Error::ClientNotSynced(_), - ) - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/guard.rs b/polkadot/bridges/relays/client-substrate/src/guard.rs deleted file mode 100644 index 359a3f69d8a..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/guard.rs +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Pallet provides a set of guard functions that are running in background threads -//! and are aborting process if some condition fails. - -use crate::{error::Error, Chain, ChainWithBalances, Client}; - -use async_trait::async_trait; -use num_traits::CheckedSub; -use sp_version::RuntimeVersion; -use std::{ - collections::VecDeque, - fmt::Display, - time::{Duration, Instant}, -}; - -/// Guards environment. -#[async_trait] -pub trait Environment: Send + Sync + 'static { - /// Error type. - type Error: Display + Send + Sync + 'static; - - /// Return current runtime version. - async fn runtime_version(&mut self) -> Result; - /// Return free native balance of the account on the chain. - async fn free_native_balance( - &mut self, - account: C::AccountId, - ) -> Result; - - /// Return current time. - fn now(&self) -> Instant { - Instant::now() - } - - /// Sleep given amount of time. - async fn sleep(&mut self, duration: Duration) { - async_std::task::sleep(duration).await - } - - /// Abort current process. Called when guard condition check fails. - async fn abort(&mut self) { - std::process::abort(); - } -} - -/// Abort when runtime spec version is different from specified. -pub fn abort_on_spec_version_change( - mut env: impl Environment, - expected_spec_version: u32, -) { - async_std::task::spawn(async move { - log::info!( - target: "bridge-guard", - "Starting spec_version guard for {}. Expected spec_version: {}", - C::NAME, - expected_spec_version, - ); - - loop { - let actual_spec_version = env.runtime_version().await; - match actual_spec_version { - Ok(version) if version.spec_version == expected_spec_version => (), - Ok(version) => { - log::error!( - target: "bridge-guard", - "{} runtime spec version has changed from {} to {}. Aborting relay", - C::NAME, - expected_spec_version, - version.spec_version, - ); - - env.abort().await; - }, - Err(error) => log::warn!( - target: "bridge-guard", - "Failed to read {} runtime version: {}. Relay may need to be stopped manually", - C::NAME, - error, - ), - } - - env.sleep(conditions_check_delay::()).await; - } - }); -} - -/// Abort if, during 24 hours, free balance of given account is decreased at least by given value. -/// Other components may increase (or decrease) balance of account and it WILL affect logic of the -/// guard. -pub fn abort_when_account_balance_decreased( - mut env: impl Environment, - account_id: C::AccountId, - maximal_decrease: C::Balance, -) { - const DAY: Duration = Duration::from_secs(60 * 60 * 24); - - async_std::task::spawn(async move { - log::info!( - target: "bridge-guard", - "Starting balance guard for {}/{:?}. Maximal decrease: {:?}", - C::NAME, - account_id, - maximal_decrease, - ); - - let mut balances = VecDeque::new(); - - loop { - let current_time = env.now(); - - // remember balances that are beyound 24h border - let time_border = current_time - DAY; - while balances.front().map(|(time, _)| *time < time_border).unwrap_or(false) { - balances.pop_front(); - } - - // read balance of the account - let current_balance = env.free_native_balance(account_id.clone()).await; - - // remember balance and check difference - match current_balance { - Ok(current_balance) => { - // remember balance - balances.push_back((current_time, current_balance)); - - // check if difference between current and oldest balance is too large - let (oldest_time, oldest_balance) = - balances.front().expect("pushed to queue couple of lines above; qed"); - let balances_difference = oldest_balance.checked_sub(¤t_balance); - if balances_difference > Some(maximal_decrease) { - log::error!( - target: "bridge-guard", - "Balance of {} account {:?} has decreased from {:?} to {:?} in {} minutes. Aborting relay", - C::NAME, - account_id, - oldest_balance, - current_balance, - current_time.duration_since(*oldest_time).as_secs() / 60, - ); - - env.abort().await; - } - }, - Err(error) => { - log::warn!( - target: "bridge-guard", - "Failed to read {} account {:?} balance: {}. Relay may need to be stopped manually", - C::NAME, - account_id, - error, - ); - }, - }; - - env.sleep(conditions_check_delay::()).await; - } - }); -} - -/// Delay between conditions check. -fn conditions_check_delay() -> Duration { - C::AVERAGE_BLOCK_INTERVAL * (10 + rand::random::() % 10) -} - -#[async_trait] -impl Environment for Client { - type Error = Error; - - async fn runtime_version(&mut self) -> Result { - Client::::runtime_version(self).await - } - - async fn free_native_balance( - &mut self, - account: C::AccountId, - ) -> Result { - Client::::free_native_balance(self, account).await - } -} - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::weights::{IdentityFee, Weight}; - use futures::{ - channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, - future::FutureExt, - stream::StreamExt, - SinkExt, - }; - - #[derive(Debug, Clone)] - struct TestChain; - - impl bp_runtime::Chain for TestChain { - type BlockNumber = u32; - type Hash = sp_core::H256; - type Hasher = sp_runtime::traits::BlakeTwo256; - type Header = sp_runtime::generic::Header; - - type AccountId = u32; - type Balance = u32; - type Index = u32; - type Signature = sp_runtime::testing::TestSignature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - fn max_extrinsic_weight() -> Weight { - unreachable!() - } - } - - impl Chain for TestChain { - const NAME: &'static str = "Test"; - const TOKEN_ID: Option<&'static str> = None; - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = "BestTestHeader"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(1); - const STORAGE_PROOF_OVERHEAD: u32 = 0; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = 0; - - type SignedBlock = sp_runtime::generic::SignedBlock< - sp_runtime::generic::Block, - >; - type Call = (); - type WeightToFee = IdentityFee; - } - - impl ChainWithBalances for TestChain { - fn account_info_storage_key(_account_id: &u32) -> sp_core::storage::StorageKey { - unreachable!() - } - } - - struct TestEnvironment { - runtime_version_rx: UnboundedReceiver, - free_native_balance_rx: UnboundedReceiver, - slept_tx: UnboundedSender<()>, - aborted_tx: UnboundedSender<()>, - } - - #[async_trait] - impl Environment for TestEnvironment { - type Error = Error; - - async fn runtime_version(&mut self) -> Result { - Ok(self.runtime_version_rx.next().await.unwrap_or_default()) - } - - async fn free_native_balance(&mut self, _account: u32) -> Result { - Ok(self.free_native_balance_rx.next().await.unwrap_or_default()) - } - - async fn sleep(&mut self, _duration: Duration) { - let _ = self.slept_tx.send(()).await; - } - - async fn abort(&mut self) { - let _ = self.aborted_tx.send(()).await; - // simulate process abort :) - async_std::task::sleep(Duration::from_secs(60)).await; - } - } - - #[test] - fn aborts_when_spec_version_is_changed() { - async_std::task::block_on(async { - let ( - (mut runtime_version_tx, runtime_version_rx), - (_free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_on_spec_version_change( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 0, - ); - - // client responds with wrong version - runtime_version_tx - .send(RuntimeVersion { spec_version: 42, ..Default::default() }) - .await - .unwrap(); - - // then the `abort` function is called - aborted_rx.next().await; - // and we do not reach the `sleep` function call - assert!(slept_rx.next().now_or_never().is_none()); - }); - } - - #[test] - fn does_not_aborts_when_spec_version_is_unchanged() { - async_std::task::block_on(async { - let ( - (mut runtime_version_tx, runtime_version_rx), - (_free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_on_spec_version_change( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 42, - ); - - // client responds with the same version - runtime_version_tx - .send(RuntimeVersion { spec_version: 42, ..Default::default() }) - .await - .unwrap(); - - // then the `sleep` function is called - slept_rx.next().await; - // and the `abort` function is not called - assert!(aborted_rx.next().now_or_never().is_none()); - }); - } - - #[test] - fn aborts_when_balance_is_too_low() { - async_std::task::block_on(async { - let ( - (_runtime_version_tx, runtime_version_rx), - (mut free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_when_account_balance_decreased( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 0, - 100, - ); - - // client responds with initial balance - free_native_balance_tx.send(1000).await.unwrap(); - - // then the guard sleeps - slept_rx.next().await; - - // and then client responds with updated balance, which is too low - free_native_balance_tx.send(899).await.unwrap(); - - // then the `abort` function is called - aborted_rx.next().await; - // and we do not reach next `sleep` function call - assert!(slept_rx.next().now_or_never().is_none()); - }); - } - - #[test] - fn does_not_aborts_when_balance_is_enough() { - async_std::task::block_on(async { - let ( - (_runtime_version_tx, runtime_version_rx), - (mut free_native_balance_tx, free_native_balance_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded(), unbounded()); - abort_when_account_balance_decreased( - TestEnvironment { - runtime_version_rx, - free_native_balance_rx, - slept_tx, - aborted_tx, - }, - 0, - 100, - ); - - // client responds with initial balance - free_native_balance_tx.send(1000).await.unwrap(); - - // then the guard sleeps - slept_rx.next().await; - - // and then client responds with updated balance, which is enough - free_native_balance_tx.send(950).await.unwrap(); - - // then the `sleep` function is called - slept_rx.next().await; - // and `abort` is not called - assert!(aborted_rx.next().now_or_never().is_none()); - }); - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/lib.rs b/polkadot/bridges/relays/client-substrate/src/lib.rs deleted file mode 100644 index b3a7ec41419..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/lib.rs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools to interact with Substrate node using RPC methods. - -#![warn(missing_docs)] - -mod chain; -mod client; -mod error; -mod rpc; -mod sync_header; - -pub mod guard; -pub mod metrics; - -use std::time::Duration; - -pub use crate::{ - chain::{ - AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, - ChainWithGrandpa, ChainWithMessages, SignParam, TransactionSignScheme, TransactionStatusOf, - UnsignedTransaction, WeightToFeeOf, - }, - client::{ChainRuntimeVersion, Client, OpaqueGrandpaAuthoritiesSet, Subscription}, - error::{Error, Result}, - sync_header::SyncHeader, -}; -pub use bp_runtime::{ - AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain as ChainBase, HashOf, HeaderOf, - IndexOf, SignatureOf, TransactionEra, TransactionEraOf, -}; - -/// Header id used by the chain. -pub type HeaderIdOf = relay_utils::HeaderId, BlockNumberOf>; - -/// Substrate-over-websocket connection params. -#[derive(Debug, Clone)] -pub struct ConnectionParams { - /// Websocket server host name. - pub host: String, - /// Websocket server TCP port. - pub port: u16, - /// Use secure websocket connection. - pub secure: bool, - /// Defined chain runtime version - pub chain_runtime_version: ChainRuntimeVersion, -} - -impl Default for ConnectionParams { - fn default() -> Self { - ConnectionParams { - host: "localhost".into(), - port: 9944, - secure: false, - chain_runtime_version: ChainRuntimeVersion::Auto, - } - } -} - -/// Returns stall timeout for relay loop. -/// -/// Relay considers himself stalled if he has submitted transaction to the node, but it has not -/// been mined for this period. -pub fn transaction_stall_timeout( - mortality_period: Option, - average_block_interval: Duration, - default_stall_timeout: Duration, -) -> Duration { - // 1 extra block for transaction to reach the pool && 1 for relayer to awake after it is mined - mortality_period - .map(|mortality_period| average_block_interval.saturating_mul(mortality_period + 1 + 1)) - .unwrap_or(default_stall_timeout) -} - -/// Returns stall timeout for relay loop that submit transactions to two chains. -/// -/// Bidirectional relay may have two active transactions. Even if one of them has been spoiled, we -/// can't just restart the loop - the other transaction may still be alive and we'll be submitting -/// duplicate transaction, which may result in funds loss. So we'll be selecting maximal mortality -/// for choosing loop stall timeout. -pub fn bidirectional_transaction_stall_timeout( - left_mortality_period: Option, - right_mortality_period: Option, - left_average_block_interval: Duration, - right_average_block_interval: Duration, - default_stall_timeout: Duration, -) -> Duration { - std::cmp::max( - transaction_stall_timeout( - left_mortality_period, - left_average_block_interval, - default_stall_timeout, - ), - transaction_stall_timeout( - right_mortality_period, - right_average_block_interval, - default_stall_timeout, - ), - ) -} diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs deleted file mode 100644 index 7bb92693b38..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/metrics/float_storage_value.rs +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{chain::Chain, client::Client, Error as SubstrateError}; - -use async_std::sync::{Arc, RwLock}; -use async_trait::async_trait; -use codec::Decode; -use num_traits::One; -use relay_utils::metrics::{ - metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, - StandaloneMetric, F64, -}; -use sp_core::storage::{StorageData, StorageKey}; -use sp_runtime::{traits::UniqueSaturatedInto, FixedPointNumber, FixedU128}; -use std::{marker::PhantomData, time::Duration}; - -/// Storage value update interval (in blocks). -const UPDATE_INTERVAL_IN_BLOCKS: u32 = 5; - -/// Fied-point storage value and the way it is decoded from the raw storage value. -pub trait FloatStorageValue: 'static + Clone + Send + Sync { - /// Type of the value. - type Value: FixedPointNumber; - /// Try to decode value from the raw storage value. - fn decode( - &self, - maybe_raw_value: Option, - ) -> Result, SubstrateError>; -} - -/// Implementation of `FloatStorageValue` that expects encoded `FixedU128` value and returns `1` if -/// value is missing from the storage. -#[derive(Clone, Debug, Default)] -pub struct FixedU128OrOne; - -impl FloatStorageValue for FixedU128OrOne { - type Value = FixedU128; - - fn decode( - &self, - maybe_raw_value: Option, - ) -> Result, SubstrateError> { - maybe_raw_value - .map(|raw_value| { - FixedU128::decode(&mut &raw_value.0[..]) - .map_err(SubstrateError::ResponseParseFailed) - .map(Some) - }) - .unwrap_or_else(|| Ok(Some(FixedU128::one()))) - } -} - -/// Metric that represents fixed-point runtime storage value as float gauge. -#[derive(Clone, Debug)] -pub struct FloatStorageValueMetric { - value_converter: V, - client: Client, - storage_key: StorageKey, - metric: Gauge, - shared_value_ref: F64SharedRef, - _phantom: PhantomData, -} - -impl FloatStorageValueMetric { - /// Create new metric. - pub fn new( - value_converter: V, - client: Client, - storage_key: StorageKey, - name: String, - help: String, - ) -> Result { - let shared_value_ref = Arc::new(RwLock::new(None)); - Ok(FloatStorageValueMetric { - value_converter, - client, - storage_key, - metric: Gauge::new(metric_name(None, &name), help)?, - shared_value_ref, - _phantom: Default::default(), - }) - } - - /// Get shared reference to metric value. - pub fn shared_value_ref(&self) -> F64SharedRef { - self.shared_value_ref.clone() - } -} - -impl Metric for FloatStorageValueMetric { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.metric.clone(), registry).map(drop) - } -} - -#[async_trait] -impl StandaloneMetric for FloatStorageValueMetric { - fn update_interval(&self) -> Duration { - C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS - } - - async fn update(&self) { - let value = self - .client - .raw_storage_value(self.storage_key.clone(), None) - .await - .and_then(|maybe_storage_value| { - self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { - maybe_fixed_point_value.map(|fixed_point_value| { - fixed_point_value.into_inner().unique_saturated_into() as f64 / - V::Value::DIV.unique_saturated_into() as f64 - }) - }) - }) - .map_err(|e| e.to_string()); - relay_utils::metrics::set_gauge_value(&self.metric, value.clone()); - *self.shared_value_ref.write().await = value.ok().and_then(|x| x); - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/mod.rs b/polkadot/bridges/relays/client-substrate/src/metrics/mod.rs deleted file mode 100644 index 3b63099e000..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/metrics/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Contains several Substrate-specific metrics that may be exposed by relay. - -pub use float_storage_value::{FixedU128OrOne, FloatStorageValue, FloatStorageValueMetric}; -pub use storage_proof_overhead::StorageProofOverheadMetric; - -mod float_storage_value; -mod storage_proof_overhead; diff --git a/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs b/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs deleted file mode 100644 index f1c770ed228..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/metrics/storage_proof_overhead.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{chain::Chain, client::Client, error::Error}; - -use async_trait::async_trait; -use relay_utils::metrics::{ - metric_name, register, Gauge, Metric, PrometheusError, Registry, StandaloneMetric, U64, -}; -use sp_core::storage::StorageKey; -use sp_runtime::traits::Header as HeaderT; -use sp_storage::well_known_keys::CODE; -use std::time::Duration; - -/// Storage proof overhead update interval (in blocks). -const UPDATE_INTERVAL_IN_BLOCKS: u32 = 100; - -/// Metric that represents extra size of storage proof as unsigned integer gauge. -/// -/// There's one thing to keep in mind when using this metric: the overhead may be slightly -/// different for other values, but this metric gives a good estimation. -#[derive(Debug)] -pub struct StorageProofOverheadMetric { - client: Client, - metric: Gauge, -} - -impl Clone for StorageProofOverheadMetric { - fn clone(&self) -> Self { - StorageProofOverheadMetric { client: self.client.clone(), metric: self.metric.clone() } - } -} - -impl StorageProofOverheadMetric { - /// Create new metric instance with given name and help. - pub fn new(client: Client, name: String, help: String) -> Result { - Ok(StorageProofOverheadMetric { - client, - metric: Gauge::new(metric_name(None, &name), help)?, - }) - } - - /// Returns approximate storage proof size overhead. - async fn compute_storage_proof_overhead(&self) -> Result { - let best_header_hash = self.client.best_finalized_header_hash().await?; - let best_header = self.client.header_by_hash(best_header_hash).await?; - - let storage_proof = self - .client - .prove_storage(vec![StorageKey(CODE.to_vec())], best_header_hash) - .await?; - let storage_proof_size: usize = storage_proof.clone().iter_nodes().map(|n| n.len()).sum(); - - let storage_value_reader = bp_runtime::StorageProofChecker::::new( - *best_header.state_root(), - storage_proof, - ) - .map_err(Error::StorageProofError)?; - let maybe_encoded_storage_value = - storage_value_reader.read_value(CODE).map_err(Error::StorageProofError)?; - let encoded_storage_value_size = - maybe_encoded_storage_value.ok_or(Error::MissingMandatoryCodeEntry)?.len(); - - Ok(storage_proof_size - encoded_storage_value_size) - } -} - -impl Metric for StorageProofOverheadMetric { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.metric.clone(), registry).map(drop) - } -} - -#[async_trait] -impl StandaloneMetric for StorageProofOverheadMetric { - fn update_interval(&self) -> Duration { - C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS - } - - async fn update(&self) { - relay_utils::metrics::set_gauge_value( - &self.metric, - self.compute_storage_proof_overhead() - .await - .map(|overhead| Some(overhead as u64)), - ); - } -} diff --git a/polkadot/bridges/relays/client-substrate/src/rpc.rs b/polkadot/bridges/relays/client-substrate/src/rpc.rs deleted file mode 100644 index a0172d1e550..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/rpc.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The most generic Substrate node RPC interface. - -use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use pallet_transaction_payment_rpc_runtime_api::FeeDetails; -use sc_rpc_api::{state::ReadProof, system::Health}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, -}; -use sp_rpc::number::NumberOrHex; -use sp_version::RuntimeVersion; - -#[rpc(client)] -pub(crate) trait Substrate { - #[method(name = "system_health", param_kind = array)] - async fn system_health(&self) -> RpcResult; - #[method(name = "system_properties", param_kind = array)] - async fn system_properties(&self) -> RpcResult; - #[method(name = "chain_getHeader", param_kind = array)] - async fn chain_get_header(&self, block_hash: Option) -> RpcResult
; - #[method(name = "chain_getFinalizedHead", param_kind = array)] - async fn chain_get_finalized_head(&self) -> RpcResult; - #[method(name = "chain_getBlock", param_kind = array)] - async fn chain_get_block(&self, block_hash: Option) -> RpcResult; - #[method(name = "chain_getBlockHash", param_kind = array)] - async fn chain_get_block_hash(&self, block_number: Option) -> RpcResult; - #[method(name = "system_accountNextIndex", param_kind = array)] - async fn system_account_next_index(&self, account_id: AccountId) -> RpcResult; - #[method(name = "author_submitExtrinsic", param_kind = array)] - async fn author_submit_extrinsic(&self, extrinsic: Bytes) -> RpcResult; - #[method(name = "author_pendingExtrinsics", param_kind = array)] - async fn author_pending_extrinsics(&self) -> RpcResult>; - #[method(name = "state_call", param_kind = array)] - async fn state_call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> RpcResult; - #[method(name = "state_getStorage", param_kind = array)] - async fn state_get_storage( - &self, - key: StorageKey, - at_block: Option, - ) -> RpcResult>; - #[method(name = "state_getReadProof", param_kind = array)] - async fn state_prove_storage( - &self, - keys: Vec, - hash: Option, - ) -> RpcResult>; - #[method(name = "state_getRuntimeVersion", param_kind = array)] - async fn state_runtime_version(&self) -> RpcResult; - #[method(name = "payment_queryFeeDetails", param_kind = array)] - async fn payment_query_fee_details( - &self, - extrinsic: Bytes, - at_block: Option, - ) -> RpcResult>; -} diff --git a/polkadot/bridges/relays/client-substrate/src/sync_header.rs b/polkadot/bridges/relays/client-substrate/src/sync_header.rs deleted file mode 100644 index e45e6b4197a..00000000000 --- a/polkadot/bridges/relays/client-substrate/src/sync_header.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use bp_header_chain::find_grandpa_authorities_scheduled_change; -use finality_relay::SourceHeader as FinalitySourceHeader; -use sp_runtime::traits::Header as HeaderT; - -/// Generic wrapper for `sp_runtime::traits::Header` based headers, that -/// implements `finality_relay::SourceHeader` and may be used in headers sync directly. -#[derive(Clone, Debug, PartialEq)] -pub struct SyncHeader
(Header); - -impl
SyncHeader
{ - /// Extracts wrapped header from self. - pub fn into_inner(self) -> Header { - self.0 - } -} - -impl
std::ops::Deref for SyncHeader
{ - type Target = Header; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl
From
for SyncHeader
{ - fn from(header: Header) -> Self { - Self(header) - } -} - -impl FinalitySourceHeader for SyncHeader
{ - fn hash(&self) -> Header::Hash { - self.0.hash() - } - - fn number(&self) -> Header::Number { - *self.0.number() - } - - fn is_mandatory(&self) -> bool { - find_grandpa_authorities_scheduled_change(&self.0).is_some() - } -} diff --git a/polkadot/bridges/relays/client-westend/Cargo.toml b/polkadot/bridges/relays/client-westend/Cargo.toml deleted file mode 100644 index d38aa162994..00000000000 --- a/polkadot/bridges/relays/client-westend/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "relay-westend-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Bridge dependencies - -bp-westend = { path = "../../primitives/chain-westend" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-westend/src/lib.rs b/polkadot/bridges/relays/client-westend/src/lib.rs deleted file mode 100644 index caf0c010c56..00000000000 --- a/polkadot/bridges/relays/client-westend/src/lib.rs +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Westend chain. - -use frame_support::weights::Weight; -use relay_substrate_client::{Chain, ChainBase, ChainWithBalances, ChainWithGrandpa}; -use sp_core::storage::StorageKey; -use std::time::Duration; - -/// Westend header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Westend header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; - -/// Westend chain definition -#[derive(Debug, Clone, Copy)] -pub struct Westend; - -impl ChainBase for Westend { - type BlockNumber = bp_westend::BlockNumber; - type Hash = bp_westend::Hash; - type Hasher = bp_westend::Hasher; - type Header = bp_westend::Header; - - type AccountId = bp_westend::AccountId; - type Balance = bp_westend::Balance; - type Index = bp_westend::Nonce; - type Signature = bp_westend::Signature; - - fn max_extrinsic_size() -> u32 { - bp_westend::Westend::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_westend::Westend::max_extrinsic_weight() - } -} - -impl Chain for Westend { - const NAME: &'static str = "Westend"; - const TOKEN_ID: Option<&'static str> = None; - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = - bp_westend::BEST_FINALIZED_WESTEND_HEADER_METHOD; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); - const STORAGE_PROOF_OVERHEAD: u32 = bp_westend::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_westend::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = bp_westend::SignedBlock; - type Call = bp_westend::Call; - type WeightToFee = bp_westend::WeightToFee; -} - -impl ChainWithGrandpa for Westend { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = - bp_westend::WITH_WESTEND_GRANDPA_PALLET_NAME; -} - -impl ChainWithBalances for Westend { - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - StorageKey(bp_westend::account_info_storage_key(account_id)) - } -} diff --git a/polkadot/bridges/relays/client-wococo/Cargo.toml b/polkadot/bridges/relays/client-wococo/Cargo.toml deleted file mode 100644 index 6845ac34c84..00000000000 --- a/polkadot/bridges/relays/client-wococo/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "relay-wococo-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.0.0" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } - -# Bridge dependencies -bridge-runtime-common = { path = "../../bin/runtime-common" } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-message-dispatch = { path = "../../primitives/message-dispatch" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-rococo = { path = "../../primitives/chain-rococo" } -bp-runtime = { path = "../../primitives/runtime" } -bp-wococo = { path = "../../primitives/chain-wococo" } -pallet-bridge-dispatch = { path = "../../modules/dispatch" } -pallet-bridge-messages = { path = "../../modules/messages" } - -# Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/client-wococo/src/lib.rs b/polkadot/bridges/relays/client-wococo/src/lib.rs deleted file mode 100644 index 485ca1bd62f..00000000000 --- a/polkadot/bridges/relays/client-wococo/src/lib.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types used to connect to the Wococo-Substrate chain. - -use bp_messages::MessageNonce; -use codec::Encode; -use frame_support::weights::Weight; -use relay_substrate_client::{ - Chain, ChainBase, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, - Error as SubstrateError, SignParam, TransactionSignScheme, UnsignedTransaction, -}; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; -use std::time::Duration; - -pub mod runtime; - -/// Wococo header id. -pub type HeaderId = relay_utils::HeaderId; - -/// Wococo header type used in headers sync. -pub type SyncHeader = relay_substrate_client::SyncHeader; - -/// Wococo chain definition -#[derive(Debug, Clone, Copy)] -pub struct Wococo; - -impl ChainBase for Wococo { - type BlockNumber = bp_wococo::BlockNumber; - type Hash = bp_wococo::Hash; - type Hasher = bp_wococo::Hashing; - type Header = bp_wococo::Header; - - type AccountId = bp_wococo::AccountId; - type Balance = bp_wococo::Balance; - type Index = bp_wococo::Nonce; - type Signature = bp_wococo::Signature; - - fn max_extrinsic_size() -> u32 { - bp_wococo::Wococo::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - bp_wococo::Wococo::max_extrinsic_weight() - } -} - -impl Chain for Wococo { - const NAME: &'static str = "Wococo"; - const TOKEN_ID: Option<&'static str> = None; - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = - bp_wococo::BEST_FINALIZED_WOCOCO_HEADER_METHOD; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(6); - const STORAGE_PROOF_OVERHEAD: u32 = bp_wococo::EXTRA_STORAGE_PROOF_SIZE; - const MAXIMAL_ENCODED_ACCOUNT_ID_SIZE: u32 = bp_wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE; - - type SignedBlock = bp_wococo::SignedBlock; - type Call = crate::runtime::Call; - type WeightToFee = bp_wococo::WeightToFee; -} - -impl ChainWithGrandpa for Wococo { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = bp_wococo::WITH_WOCOCO_GRANDPA_PALLET_NAME; -} - -impl ChainWithMessages for Wococo { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - bp_wococo::WITH_WOCOCO_MESSAGES_PALLET_NAME; - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = - bp_wococo::TO_WOCOCO_MESSAGE_DETAILS_METHOD; - const PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN: Weight = - bp_wococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - bp_wococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - bp_wococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - type WeightInfo = (); -} - -impl ChainWithBalances for Wococo { - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey { - StorageKey(bp_wococo::account_info_storage_key(account_id)) - } -} - -impl TransactionSignScheme for Wococo { - type Chain = Wococo; - type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = crate::runtime::UncheckedExtrinsic; - - fn sign_transaction(param: SignParam) -> Result { - let raw_payload = SignedPayload::new( - param.unsigned.call.clone(), - bp_wococo::SignedExtensions::new( - param.spec_version, - param.transaction_version, - param.era, - param.genesis_hash, - param.unsigned.nonce, - param.unsigned.tip, - ), - ) - .expect("SignedExtension never fails."); - - let signature = raw_payload.using_encoded(|payload| param.signer.sign(payload)); - let signer: sp_runtime::MultiSigner = param.signer.public().into(); - let (call, extra, _) = raw_payload.deconstruct(); - - Ok(bp_wococo::UncheckedExtrinsic::new_signed( - call, - sp_runtime::MultiAddress::Id(signer.into_account()), - signature.into(), - extra, - )) - } - - fn is_signed(tx: &Self::SignedTransaction) -> bool { - tx.signature.is_some() - } - - fn is_signed_by(signer: &Self::AccountKeyPair, tx: &Self::SignedTransaction) -> bool { - tx.signature - .as_ref() - .map(|(address, _, _)| { - *address == bp_wococo::AccountId::from(*signer.public().as_array_ref()).into() - }) - .unwrap_or(false) - } - - fn parse_transaction(tx: Self::SignedTransaction) -> Option> { - let extra = &tx.signature.as_ref()?.2; - Some(UnsignedTransaction { call: tx.function, nonce: extra.nonce(), tip: extra.tip() }) - } -} - -/// Wococo signing params. -pub type SigningParams = sp_core::sr25519::Pair; diff --git a/polkadot/bridges/relays/client-wococo/src/runtime.rs b/polkadot/bridges/relays/client-wococo/src/runtime.rs deleted file mode 100644 index b28e053086b..00000000000 --- a/polkadot/bridges/relays/client-wococo/src/runtime.rs +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that are specific to the Wococo runtime. - -use bp_messages::{LaneId, UnrewardedRelayersState}; -use bp_polkadot_core::{AccountAddress, Balance, PolkadotLike}; -use bp_runtime::Chain; -use codec::{Compact, Decode, Encode}; -use frame_support::weights::Weight; -use scale_info::TypeInfo; - -/// Unchecked Wococo extrinsic. -pub type UncheckedExtrinsic = bp_polkadot_core::UncheckedExtrinsic; - -/// Rococo account ownership digest from Wococo. -/// -/// The byte vector returned by this function should be signed with a Rococo account private key. -/// This way, the owner of `wococo_account_id` on Rococo proves that the Rococo account private key -/// is also under his control. -pub fn wococo_to_rococo_account_ownership_digest( - rococo_call: &Call, - wococo_account_id: AccountId, - rococo_spec_version: SpecVersion, -) -> Vec -where - Call: codec::Encode, - AccountId: codec::Encode, - SpecVersion: codec::Encode, -{ - pallet_bridge_dispatch::account_ownership_digest( - rococo_call, - wococo_account_id, - rococo_spec_version, - bp_runtime::WOCOCO_CHAIN_ID, - bp_runtime::ROCOCO_CHAIN_ID, - ) -} - -/// Wococo Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to Rococo chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with Rococo -/// `construct_runtime`, so that we maintain SCALE-compatibility. -/// -/// See: [link](https://github.com/paritytech/polkadot/blob/master/runtime/rococo/src/lib.rs) -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// System pallet. - #[codec(index = 0)] - System(SystemCall), - /// Balances pallet. - #[codec(index = 4)] - Balances(BalancesCall), - /// Rococo bridge pallet. - #[codec(index = 40)] - BridgeGrandpaRococo(BridgeGrandpaRococoCall), - /// Rococo messages pallet. - #[codec(index = 43)] - BridgeRococoMessages(BridgeRococoMessagesCall), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum SystemCall { - #[codec(index = 1)] - remark(Vec), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BalancesCall { - #[codec(index = 0)] - transfer(AccountAddress, Compact), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeGrandpaRococoCall { - #[codec(index = 0)] - submit_finality_proof( - Box<::Header>, - bp_header_chain::justification::GrandpaJustification<::Header>, - ), - #[codec(index = 1)] - initialize(bp_header_chain::InitializationData<::Header>), -} - -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeRococoMessagesCall { - #[codec(index = 3)] - send_message( - LaneId, - bp_message_dispatch::MessagePayload< - bp_rococo::AccountId, - bp_wococo::AccountId, - bp_wococo::AccountPublic, - Vec, - >, - bp_rococo::Balance, - ), - #[codec(index = 5)] - receive_messages_proof( - bp_rococo::AccountId, - bridge_runtime_common::messages::target::FromBridgedChainMessagesProof, - u32, - Weight, - ), - #[codec(index = 6)] - receive_messages_delivery_proof( - bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof< - bp_rococo::Hash, - >, - UnrewardedRelayersState, - ), -} - -impl sp_runtime::traits::Dispatchable for Call { - type Origin = (); - type Config = (); - type Info = (); - type PostInfo = (); - - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - unimplemented!("The Call is not expected to be dispatched.") - } -} diff --git a/polkadot/bridges/relays/finality/Cargo.toml b/polkadot/bridges/relays/finality/Cargo.toml deleted file mode 100644 index cc5ae54be33..00000000000 --- a/polkadot/bridges/relays/finality/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "finality-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -description = "Finality proofs relay" - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -bp-header-chain = { path = "../../primitives/header-chain" } -futures = "0.3.5" -log = "0.4.11" -num-traits = "0.2" -relay-utils = { path = "../utils" } - -[dev-dependencies] -parking_lot = "0.11.0" diff --git a/polkadot/bridges/relays/finality/src/finality_loop.rs b/polkadot/bridges/relays/finality/src/finality_loop.rs deleted file mode 100644 index c29a5d5fec2..00000000000 --- a/polkadot/bridges/relays/finality/src/finality_loop.rs +++ /dev/null @@ -1,692 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The loop basically reads all missing headers and their finality proofs from the source client. -//! The proof for the best possible header is then submitted to the target node. The only exception -//! is the mandatory headers, which we always submit to the target node. For such headers, we -//! assume that the persistent proof either exists, or will eventually become available. - -use crate::{ - sync_loop_metrics::SyncLoopMetrics, FinalityProof, FinalitySyncPipeline, SourceHeader, -}; - -use async_trait::async_trait; -use backoff::backoff::Backoff; -use futures::{select, Future, FutureExt, Stream, StreamExt}; -use num_traits::{One, Saturating}; -use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, retry_backoff, FailedClient, - HeaderId, MaybeConnectionError, -}; -use std::{ - pin::Pin, - time::{Duration, Instant}, -}; - -/// Finality proof synchronization loop parameters. -#[derive(Debug, Clone)] -pub struct FinalitySyncParams { - /// Interval at which we check updates on both clients. Normally should be larger than - /// `min(source_block_time, target_block_time)`. - /// - /// This parameter may be used to limit transactions rate. Increase the value && you'll get - /// infrequent updates => sparse headers => potential slow down of bridge applications, but - /// pallet storage won't be super large. Decrease the value to near `source_block_time` and - /// you'll get transaction for (almost) every block of the source chain => all source headers - /// will be known to the target chain => bridge applications will run faster, but pallet - /// storage may explode (but if pruning is there, then it's fine). - pub tick: Duration, - /// Number of finality proofs to keep in internal buffer between loop iterations. - /// - /// While in "major syncing" state, we still read finality proofs from the stream. They're - /// stored in the internal buffer between loop iterations. When we're close to the tip of the - /// chain, we may meet finality delays if headers are not finalized frequently. So instead of - /// waiting for next finality proof to appear in the stream, we may use existing proof from - /// that buffer. - pub recent_finality_proofs_limit: usize, - /// Timeout before we treat our transactions as lost and restart the whole sync process. - pub stall_timeout: Duration, - /// If true, only mandatory headers are relayed. - pub only_mandatory_headers: bool, -} - -/// Source client used in finality synchronization loop. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Stream of new finality proofs. The stream is allowed to miss proofs for some - /// headers, even if those headers are mandatory. - type FinalityProofsStream: Stream + Send; - - /// Get best finalized block number. - async fn best_finalized_block_number(&self) -> Result; - - /// Get canonical header and its finality proof by number. - async fn header_and_finality_proof( - &self, - number: P::Number, - ) -> Result<(P::Header, Option), Self::Error>; - - /// Subscribe to new finality proofs. - async fn finality_proofs(&self) -> Result; -} - -/// Target client used in finality synchronization loop. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Get best finalized source block number. - async fn best_finalized_source_block_id( - &self, - ) -> Result, Self::Error>; - - /// Submit header finality proof. - async fn submit_finality_proof( - &self, - header: P::Header, - proof: P::FinalityProof, - ) -> Result<(), Self::Error>; -} - -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs -/// sync loop. -pub fn metrics_prefix() -> String { - format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) -} - -/// Run finality proofs synchronization loop. -pub async fn run( - source_client: impl SourceClient

, - target_client: impl TargetClient

, - sync_params: FinalitySyncParams, - metrics_params: MetricsParams, - exit_signal: impl Future + 'static + Send, -) -> Result<(), relay_utils::Error> { - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .with_metrics(metrics_params) - .loop_metric(SyncLoopMetrics::new( - Some(&metrics_prefix::

()), - "source", - "source_at_target", - )?)? - .expose() - .await? - .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost( - source_client, - target_client, - sync_params.clone(), - metrics, - exit_signal.clone(), - ) - }) - .await -} - -/// Unjustified headers container. Ordered by header number. -pub(crate) type UnjustifiedHeaders = Vec; -/// Finality proofs container. Ordered by target header number. -pub(crate) type FinalityProofs

= - Vec<(

::Number,

::FinalityProof)>; -/// Reference to finality proofs container. -pub(crate) type FinalityProofsRef<'a, P> = - &'a [(

::Number,

::FinalityProof)]; - -/// Error that may happen inside finality synchronization loop. -#[derive(Debug)] -pub(crate) enum Error { - /// Source client request has failed with given error. - Source(SourceError), - /// Target client request has failed with given error. - Target(TargetError), - /// Finality proof for mandatory header is missing from the source node. - MissingMandatoryFinalityProof(P::Number), - /// The synchronization has stalled. - Stalled, -} - -impl Error -where - P: FinalitySyncPipeline, - SourceError: MaybeConnectionError, - TargetError: MaybeConnectionError, -{ - fn fail_if_connection_error(&self) -> Result<(), FailedClient> { - match *self { - Error::Source(ref error) if error.is_connection_error() => Err(FailedClient::Source), - Error::Target(ref error) if error.is_connection_error() => Err(FailedClient::Target), - Error::Stalled => Err(FailedClient::Both), - _ => Ok(()), - } - } -} - -/// Information about transaction that we have submitted. -#[derive(Debug, Clone)] -pub(crate) struct Transaction { - /// Time when we have submitted this transaction. - pub time: Instant, - /// The number of the header we have submitted. - pub submitted_header_number: Number, -} - -/// Finality proofs stream that may be restarted. -pub(crate) struct RestartableFinalityProofsStream { - /// Flag that the stream needs to be restarted. - pub(crate) needs_restart: bool, - /// The stream itself. - pub(crate) stream: Pin>, -} - -#[cfg(test)] -impl From for RestartableFinalityProofsStream { - fn from(stream: S) -> Self { - RestartableFinalityProofsStream { needs_restart: false, stream: Box::pin(stream) } - } -} - -/// Finality synchronization loop state. -pub(crate) struct FinalityLoopState<'a, P: FinalitySyncPipeline, FinalityProofsStream> { - /// Synchronization loop progress. - pub(crate) progress: &'a mut (Instant, Option), - /// Finality proofs stream. - pub(crate) finality_proofs_stream: - &'a mut RestartableFinalityProofsStream, - /// Recent finality proofs that we have read from the stream. - pub(crate) recent_finality_proofs: &'a mut FinalityProofs

, - /// Last transaction that we have submitted to the target node. - pub(crate) last_transaction: Option>, -} - -async fn run_until_connection_lost( - source_client: impl SourceClient

, - target_client: impl TargetClient

, - sync_params: FinalitySyncParams, - metrics_sync: Option, - exit_signal: impl Future, -) -> Result<(), FailedClient> { - let restart_finality_proofs_stream = || async { - source_client.finality_proofs().await.map_err(|error| { - log::error!( - target: "bridge", - "Failed to subscribe to {} justifications: {:?}. Going to reconnect", - P::SOURCE_NAME, - error, - ); - - FailedClient::Source - }) - }; - - let exit_signal = exit_signal.fuse(); - futures::pin_mut!(exit_signal); - - let mut finality_proofs_stream = RestartableFinalityProofsStream { - needs_restart: false, - stream: Box::pin(restart_finality_proofs_stream().await?), - }; - let mut recent_finality_proofs = Vec::new(); - - let mut progress = (Instant::now(), None); - let mut retry_backoff = retry_backoff(); - let mut last_transaction = None; - - loop { - // run loop iteration - let iteration_result = run_loop_iteration( - &source_client, - &target_client, - FinalityLoopState { - progress: &mut progress, - finality_proofs_stream: &mut finality_proofs_stream, - recent_finality_proofs: &mut recent_finality_proofs, - last_transaction: last_transaction.clone(), - }, - &sync_params, - &metrics_sync, - ) - .await; - - // deal with errors - let next_tick = match iteration_result { - Ok(updated_last_transaction) => { - last_transaction = updated_last_transaction; - retry_backoff.reset(); - sync_params.tick - }, - Err(error) => { - log::error!(target: "bridge", "Finality sync loop iteration has failed with error: {:?}", error); - error.fail_if_connection_error()?; - retry_backoff.next_backoff().unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY) - }, - }; - if finality_proofs_stream.needs_restart { - log::warn!(target: "bridge", "{} finality proofs stream is being restarted", P::SOURCE_NAME); - - finality_proofs_stream.needs_restart = false; - finality_proofs_stream.stream = Box::pin(restart_finality_proofs_stream().await?); - } - - // wait till exit signal, or new source block - select! { - _ = async_std::task::sleep(next_tick).fuse() => {}, - _ = exit_signal => return Ok(()), - } - } -} - -pub(crate) async fn run_loop_iteration( - source_client: &SC, - target_client: &TC, - state: FinalityLoopState<'_, P, SC::FinalityProofsStream>, - sync_params: &FinalitySyncParams, - metrics_sync: &Option, -) -> Result>, Error> -where - P: FinalitySyncPipeline, - SC: SourceClient

, - TC: TargetClient

, -{ - // read best source headers ids from source and target nodes - let best_number_at_source = - source_client.best_finalized_block_number().await.map_err(Error::Source)?; - let best_id_at_target = - target_client.best_finalized_source_block_id().await.map_err(Error::Target)?; - let best_number_at_target = best_id_at_target.0; - - let different_hash_at_source = ensure_same_fork::(&best_id_at_target, source_client) - .await - .map_err(Error::Source)?; - let using_same_fork = different_hash_at_source.is_none(); - if let Some(ref different_hash_at_source) = different_hash_at_source { - log::error!( - target: "bridge", - "Source node ({}) and pallet at target node ({}) have different headers at the same height {:?}: \ - at-source {:?} vs at-target {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - best_number_at_target, - different_hash_at_source, - best_id_at_target.1, - ); - } - - if let Some(ref metrics_sync) = *metrics_sync { - metrics_sync.update_best_block_at_source(best_number_at_source); - metrics_sync.update_best_block_at_target(best_number_at_target); - metrics_sync.update_using_same_fork(using_same_fork); - } - *state.progress = - print_sync_progress::

(*state.progress, best_number_at_source, best_number_at_target); - - // if we have already submitted header, then we just need to wait for it - // if we're waiting too much, then we believe our transaction has been lost and restart sync - if let Some(last_transaction) = state.last_transaction { - if best_number_at_target >= last_transaction.submitted_header_number { - // transaction has been mined && we can continue - } else if last_transaction.time.elapsed() > sync_params.stall_timeout { - log::error!( - target: "bridge", - "Finality synchronization from {} to {} has stalled. Going to restart", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - - return Err(Error::Stalled) - } else { - return Ok(Some(last_transaction)) - } - } - - // submit new header if we have something new - match select_header_to_submit( - source_client, - target_client, - state.finality_proofs_stream, - state.recent_finality_proofs, - best_number_at_source, - best_number_at_target, - sync_params, - ) - .await? - { - Some((header, justification)) => { - let new_transaction = - Transaction { time: Instant::now(), submitted_header_number: header.number() }; - - log::debug!( - target: "bridge", - "Going to submit finality proof of {} header #{:?} to {}", - P::SOURCE_NAME, - new_transaction.submitted_header_number, - P::TARGET_NAME, - ); - - target_client - .submit_finality_proof(header, justification) - .await - .map_err(Error::Target)?; - Ok(Some(new_transaction)) - }, - None => Ok(None), - } -} - -pub(crate) async fn select_header_to_submit( - source_client: &SC, - target_client: &TC, - finality_proofs_stream: &mut RestartableFinalityProofsStream, - recent_finality_proofs: &mut FinalityProofs

, - best_number_at_source: P::Number, - best_number_at_target: P::Number, - sync_params: &FinalitySyncParams, -) -> Result, Error> -where - P: FinalitySyncPipeline, - SC: SourceClient

, - TC: TargetClient

, -{ - // to see that the loop is progressing - log::trace!( - target: "bridge", - "Considering range of headers ({:?}; {:?}]", - best_number_at_target, - best_number_at_source, - ); - - // read missing headers. if we see that the header schedules GRANDPA change, we need to - // submit this header - let selected_finality_proof = read_missing_headers::( - source_client, - target_client, - best_number_at_source, - best_number_at_target, - ) - .await?; - let (mut unjustified_headers, mut selected_finality_proof) = match selected_finality_proof { - SelectedFinalityProof::Mandatory(header, finality_proof) => - return Ok(Some((header, finality_proof))), - _ if sync_params.only_mandatory_headers => { - // we are not reading finality proofs from the stream, so eventually it'll break - // but we don't care about transient proofs at all, so it is acceptable - return Ok(None) - }, - SelectedFinalityProof::Regular(unjustified_headers, header, finality_proof) => - (unjustified_headers, Some((header, finality_proof))), - SelectedFinalityProof::None(unjustified_headers) => (unjustified_headers, None), - }; - - // all headers that are missing from the target client are non-mandatory - // => even if we have already selected some header and its persistent finality proof, - // we may try to select better header by reading non-persistent proofs from the stream - read_finality_proofs_from_stream::(finality_proofs_stream, recent_finality_proofs); - selected_finality_proof = select_better_recent_finality_proof::

( - recent_finality_proofs, - &mut unjustified_headers, - selected_finality_proof, - ); - - // remove obsolete 'recent' finality proofs + keep its size under certain limit - let oldest_finality_proof_to_keep = selected_finality_proof - .as_ref() - .map(|(header, _)| header.number()) - .unwrap_or(best_number_at_target); - prune_recent_finality_proofs::

( - oldest_finality_proof_to_keep, - recent_finality_proofs, - sync_params.recent_finality_proofs_limit, - ); - - Ok(selected_finality_proof) -} - -/// Ensures that both clients are on the same fork. -/// -/// Returns `Some(_)` with header has at the source client if headers are different. -async fn ensure_same_fork>( - best_id_at_target: &HeaderId, - source_client: &SC, -) -> Result, SC::Error> { - let header_at_source = source_client.header_and_finality_proof(best_id_at_target.0).await?.0; - let header_hash_at_source = header_at_source.hash(); - Ok(if best_id_at_target.1 == header_hash_at_source { - None - } else { - Some(header_hash_at_source) - }) -} - -/// Finality proof that has been selected by the `read_missing_headers` function. -pub(crate) enum SelectedFinalityProof { - /// Mandatory header and its proof has been selected. We shall submit proof for this header. - Mandatory(Header, FinalityProof), - /// Regular header and its proof has been selected. We may submit this proof, or proof for - /// some better header. - Regular(UnjustifiedHeaders

, Header, FinalityProof), - /// We haven't found any missing header with persistent proof at the target client. - None(UnjustifiedHeaders
), -} - -/// Read missing headers and their persistent finality proofs from the target client. -/// -/// If we have found some header with known proof, it is returned. -/// Otherwise, `SelectedFinalityProof::None` is returned. -/// -/// Unless we have found mandatory header, all missing headers are collected and returned. -pub(crate) async fn read_missing_headers< - P: FinalitySyncPipeline, - SC: SourceClient

, - TC: TargetClient

, ->( - source_client: &SC, - _target_client: &TC, - best_number_at_source: P::Number, - best_number_at_target: P::Number, -) -> Result, Error> { - let mut unjustified_headers = Vec::new(); - let mut selected_finality_proof = None; - let mut header_number = best_number_at_target + One::one(); - while header_number <= best_number_at_source { - let (header, finality_proof) = source_client - .header_and_finality_proof(header_number) - .await - .map_err(Error::Source)?; - let is_mandatory = header.is_mandatory(); - - match (is_mandatory, finality_proof) { - (true, Some(finality_proof)) => { - log::trace!(target: "bridge", "Header {:?} is mandatory", header_number); - return Ok(SelectedFinalityProof::Mandatory(header, finality_proof)) - }, - (true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())), - (false, Some(finality_proof)) => { - log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number); - unjustified_headers.clear(); - selected_finality_proof = Some((header, finality_proof)); - }, - (false, None) => { - unjustified_headers.push(header); - }, - } - - header_number = header_number + One::one(); - } - - log::trace!( - target: "bridge", - "Read {} {} headers. Selected finality proof for header: {:?}", - best_number_at_source.saturating_sub(best_number_at_target), - P::SOURCE_NAME, - selected_finality_proof.as_ref().map(|(header, _)| header), - ); - - Ok(match selected_finality_proof { - Some((header, proof)) => SelectedFinalityProof::Regular(unjustified_headers, header, proof), - None => SelectedFinalityProof::None(unjustified_headers), - }) -} - -/// Read finality proofs from the stream. -pub(crate) fn read_finality_proofs_from_stream< - P: FinalitySyncPipeline, - FPS: Stream, ->( - finality_proofs_stream: &mut RestartableFinalityProofsStream, - recent_finality_proofs: &mut FinalityProofs

, -) { - let mut proofs_count = 0; - let mut first_header_number = None; - let mut last_header_number = None; - loop { - let next_proof = finality_proofs_stream.stream.next(); - let finality_proof = match next_proof.now_or_never() { - Some(Some(finality_proof)) => finality_proof, - Some(None) => { - finality_proofs_stream.needs_restart = true; - break - }, - None => break, - }; - - let target_header_number = finality_proof.target_header_number(); - if first_header_number.is_none() { - first_header_number = Some(target_header_number); - } - last_header_number = Some(target_header_number); - proofs_count += 1; - - recent_finality_proofs.push((target_header_number, finality_proof)); - } - - if proofs_count != 0 { - log::trace!( - target: "bridge", - "Read {} finality proofs from {} finality stream for headers in range [{:?}; {:?}]", - proofs_count, - P::SOURCE_NAME, - first_header_number, - last_header_number, - ); - } -} - -/// Try to select better header and its proof, given finality proofs that we -/// have recently read from the stream. -pub(crate) fn select_better_recent_finality_proof( - recent_finality_proofs: FinalityProofsRef

, - unjustified_headers: &mut UnjustifiedHeaders, - selected_finality_proof: Option<(P::Header, P::FinalityProof)>, -) -> Option<(P::Header, P::FinalityProof)> { - if unjustified_headers.is_empty() || recent_finality_proofs.is_empty() { - log::trace!( - target: "bridge", - "Can not improve selected {} finality proof {:?}. No unjustified headers and recent proofs", - P::SOURCE_NAME, - selected_finality_proof.as_ref().map(|(h, _)| h.number()), - ); - return selected_finality_proof - } - - const NOT_EMPTY_PROOF: &str = "we have checked that the vec is not empty; qed"; - - // we need proofs for headers in range unjustified_range_begin..=unjustified_range_end - let unjustified_range_begin = unjustified_headers.first().expect(NOT_EMPTY_PROOF).number(); - let unjustified_range_end = unjustified_headers.last().expect(NOT_EMPTY_PROOF).number(); - - // we have proofs for headers in range buffered_range_begin..=buffered_range_end - let buffered_range_begin = recent_finality_proofs.first().expect(NOT_EMPTY_PROOF).0; - let buffered_range_end = recent_finality_proofs.last().expect(NOT_EMPTY_PROOF).0; - - // we have two ranges => find intersection - let intersection_begin = std::cmp::max(unjustified_range_begin, buffered_range_begin); - let intersection_end = std::cmp::min(unjustified_range_end, buffered_range_end); - let intersection = intersection_begin..=intersection_end; - - // find last proof from intersection - let selected_finality_proof_index = recent_finality_proofs - .binary_search_by_key(intersection.end(), |(number, _)| *number) - .unwrap_or_else(|index| index.saturating_sub(1)); - let (selected_header_number, finality_proof) = - &recent_finality_proofs[selected_finality_proof_index]; - let has_selected_finality_proof = intersection.contains(selected_header_number); - log::trace!( - target: "bridge", - "Trying to improve selected {} finality proof {:?}. Headers range: [{:?}; {:?}]. Proofs range: [{:?}; {:?}].\ - Trying to improve to: {:?}. Result: {}", - P::SOURCE_NAME, - selected_finality_proof.as_ref().map(|(h, _)| h.number()), - unjustified_range_begin, - unjustified_range_end, - buffered_range_begin, - buffered_range_end, - selected_header_number, - if has_selected_finality_proof { "improved" } else { "not improved" }, - ); - if !has_selected_finality_proof { - return selected_finality_proof - } - - // now remove all obsolete headers and extract selected header - let selected_header_position = unjustified_headers - .binary_search_by_key(selected_header_number, |header| header.number()) - .expect("unjustified_headers contain all headers from intersection; qed"); - let selected_header = unjustified_headers.swap_remove(selected_header_position); - Some((selected_header, finality_proof.clone())) -} - -pub(crate) fn prune_recent_finality_proofs( - justified_header_number: P::Number, - recent_finality_proofs: &mut FinalityProofs

, - recent_finality_proofs_limit: usize, -) { - let position = recent_finality_proofs - .binary_search_by_key(&justified_header_number, |(header_number, _)| *header_number); - - // remove all obsolete elements - *recent_finality_proofs = recent_finality_proofs - .split_off(position.map(|position| position + 1).unwrap_or_else(|position| position)); - - // now - limit vec by size - let split_index = recent_finality_proofs.len().saturating_sub(recent_finality_proofs_limit); - *recent_finality_proofs = recent_finality_proofs.split_off(split_index); -} - -fn print_sync_progress( - progress_context: (Instant, Option), - best_number_at_source: P::Number, - best_number_at_target: P::Number, -) -> (Instant, Option) { - let (prev_time, prev_best_number_at_target) = progress_context; - let now = Instant::now(); - - let need_update = now - prev_time > Duration::from_secs(10) || - prev_best_number_at_target - .map(|prev_best_number_at_target| { - best_number_at_target.saturating_sub(prev_best_number_at_target) > 10.into() - }) - .unwrap_or(true); - - if !need_update { - return (prev_time, prev_best_number_at_target) - } - - log::info!( - target: "bridge", - "Synced {:?} of {:?} headers", - best_number_at_target, - best_number_at_source, - ); - (now, Some(best_number_at_target)) -} diff --git a/polkadot/bridges/relays/finality/src/finality_loop_tests.rs b/polkadot/bridges/relays/finality/src/finality_loop_tests.rs deleted file mode 100644 index b8cb3bdb354..00000000000 --- a/polkadot/bridges/relays/finality/src/finality_loop_tests.rs +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for finality synchronization loop. - -#![cfg(test)] - -use crate::{ - finality_loop::{ - prune_recent_finality_proofs, read_finality_proofs_from_stream, run, run_loop_iteration, - select_better_recent_finality_proof, select_header_to_submit, FinalityLoopState, - FinalityProofs, FinalitySyncParams, RestartableFinalityProofsStream, SourceClient, - TargetClient, - }, - sync_loop_metrics::SyncLoopMetrics, - FinalityProof, FinalitySyncPipeline, SourceHeader, -}; - -use async_trait::async_trait; -use futures::{FutureExt, Stream, StreamExt}; -use parking_lot::Mutex; -use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, HeaderId, MaybeConnectionError, -}; -use std::{ - collections::HashMap, - pin::Pin, - sync::Arc, - time::{Duration, Instant}, -}; - -type IsMandatory = bool; -type TestNumber = u64; -type TestHash = u64; - -#[derive(Debug, Clone)] -enum TestError { - NonConnection, -} - -impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - false - } -} - -#[derive(Debug, Clone)] -struct TestFinalitySyncPipeline; - -impl FinalitySyncPipeline for TestFinalitySyncPipeline { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type Hash = TestHash; - type Number = TestNumber; - type Header = TestSourceHeader; - type FinalityProof = TestFinalityProof; -} - -#[derive(Debug, Clone, PartialEq)] -struct TestSourceHeader(IsMandatory, TestNumber, TestHash); - -impl SourceHeader for TestSourceHeader { - fn hash(&self) -> TestHash { - self.2 - } - - fn number(&self) -> TestNumber { - self.1 - } - - fn is_mandatory(&self) -> bool { - self.0 - } -} - -#[derive(Debug, Clone, PartialEq)] -struct TestFinalityProof(TestNumber); - -impl FinalityProof for TestFinalityProof { - fn target_header_number(&self) -> TestNumber { - self.0 - } -} - -#[derive(Debug, Clone, Default)] -struct ClientsData { - source_best_block_number: TestNumber, - source_headers: HashMap)>, - source_proofs: Vec, - - target_best_block_id: HeaderId, - target_headers: Vec<(TestSourceHeader, TestFinalityProof)>, -} - -#[derive(Clone)] -struct TestSourceClient { - on_method_call: Arc, - data: Arc>, -} - -#[async_trait] -impl RelayClient for TestSourceClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unreachable!() - } -} - -#[async_trait] -impl SourceClient for TestSourceClient { - type FinalityProofsStream = Pin + 'static + Send>>; - - async fn best_finalized_block_number(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(&mut *data); - Ok(data.source_best_block_number) - } - - async fn header_and_finality_proof( - &self, - number: TestNumber, - ) -> Result<(TestSourceHeader, Option), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(&mut *data); - data.source_headers.get(&number).cloned().ok_or(TestError::NonConnection) - } - - async fn finality_proofs(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(&mut *data); - Ok(futures::stream::iter(data.source_proofs.clone()).boxed()) - } -} - -#[derive(Clone)] -struct TestTargetClient { - on_method_call: Arc, - data: Arc>, -} - -#[async_trait] -impl RelayClient for TestTargetClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unreachable!() - } -} - -#[async_trait] -impl TargetClient for TestTargetClient { - async fn best_finalized_source_block_id( - &self, - ) -> Result, TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(&mut *data); - Ok(data.target_best_block_id) - } - - async fn submit_finality_proof( - &self, - header: TestSourceHeader, - proof: TestFinalityProof, - ) -> Result<(), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(&mut *data); - data.target_best_block_id = HeaderId(header.number(), header.hash()); - data.target_headers.push((header, proof)); - Ok(()) - } -} - -fn prepare_test_clients( - exit_sender: futures::channel::mpsc::UnboundedSender<()>, - state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static, - source_headers: HashMap)>, -) -> (TestSourceClient, TestTargetClient) { - let internal_state_function: Arc = - Arc::new(move |data| { - if state_function(data) { - exit_sender.unbounded_send(()).unwrap(); - } - }); - let clients_data = Arc::new(Mutex::new(ClientsData { - source_best_block_number: 10, - source_headers, - source_proofs: vec![TestFinalityProof(12), TestFinalityProof(14)], - - target_best_block_id: HeaderId(5, 5), - target_headers: vec![], - })); - ( - TestSourceClient { - on_method_call: internal_state_function.clone(), - data: clients_data.clone(), - }, - TestTargetClient { on_method_call: internal_state_function, data: clients_data }, - ) -} - -fn test_sync_params() -> FinalitySyncParams { - FinalitySyncParams { - tick: Duration::from_secs(0), - recent_finality_proofs_limit: 1024, - stall_timeout: Duration::from_secs(1), - only_mandatory_headers: false, - } -} - -fn run_sync_loop( - state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static, -) -> ClientsData { - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - let (source_client, target_client) = prepare_test_clients( - exit_sender, - state_function, - vec![ - (5, (TestSourceHeader(false, 5, 5), None)), - (6, (TestSourceHeader(false, 6, 6), None)), - (7, (TestSourceHeader(false, 7, 7), Some(TestFinalityProof(7)))), - (8, (TestSourceHeader(true, 8, 8), Some(TestFinalityProof(8)))), - (9, (TestSourceHeader(false, 9, 9), Some(TestFinalityProof(9)))), - (10, (TestSourceHeader(false, 10, 10), None)), - ] - .into_iter() - .collect(), - ); - let sync_params = test_sync_params(); - - let clients_data = source_client.data.clone(); - let _ = async_std::task::block_on(run( - source_client, - target_client, - sync_params, - MetricsParams::disabled(), - exit_receiver.into_future().map(|(_, _)| ()), - )); - - let clients_data = clients_data.lock().clone(); - clients_data -} - -#[test] -fn finality_sync_loop_works() { - let client_data = run_sync_loop(|data| { - // header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, - // because header#8 has persistent finality proof && it is mandatory => it is submitted - // header#9 has persistent finality proof, but it isn't mandatory => it is submitted, - // because there are no more persistent finality proofs - // - // once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 from - // the stream - if data.target_best_block_id.0 == 9 { - data.source_best_block_number = 14; - data.source_headers.insert(11, (TestSourceHeader(false, 11, 11), None)); - data.source_headers - .insert(12, (TestSourceHeader(false, 12, 12), Some(TestFinalityProof(12)))); - data.source_headers.insert(13, (TestSourceHeader(false, 13, 13), None)); - data.source_headers - .insert(14, (TestSourceHeader(false, 14, 14), Some(TestFinalityProof(14)))); - } - // once this ^^^ is done, we generate more blocks && read persistent proof for block 16 - if data.target_best_block_id.0 == 14 { - data.source_best_block_number = 17; - data.source_headers.insert(15, (TestSourceHeader(false, 15, 15), None)); - data.source_headers - .insert(16, (TestSourceHeader(false, 16, 16), Some(TestFinalityProof(16)))); - data.source_headers.insert(17, (TestSourceHeader(false, 17, 17), None)); - } - - data.target_best_block_id.0 == 16 - }); - - assert_eq!( - client_data.target_headers, - vec![ - // before adding 11..14: finality proof for mandatory header#8 - (TestSourceHeader(true, 8, 8), TestFinalityProof(8)), - // before adding 11..14: persistent finality proof for non-mandatory header#9 - (TestSourceHeader(false, 9, 9), TestFinalityProof(9)), - // after adding 11..14: ephemeral finality proof for non-mandatory header#14 - (TestSourceHeader(false, 14, 14), TestFinalityProof(14)), - // after adding 15..17: persistent finality proof for non-mandatory header#16 - (TestSourceHeader(false, 16, 16), TestFinalityProof(16)), - ], - ); -} - -fn run_only_mandatory_headers_mode_test( - only_mandatory_headers: bool, - has_mandatory_headers: bool, -) -> Option<(TestSourceHeader, TestFinalityProof)> { - let (exit_sender, _) = futures::channel::mpsc::unbounded(); - let (source_client, target_client) = prepare_test_clients( - exit_sender, - |_| false, - vec![ - (6, (TestSourceHeader(false, 6, 6), Some(TestFinalityProof(6)))), - (7, (TestSourceHeader(false, 7, 7), Some(TestFinalityProof(7)))), - (8, (TestSourceHeader(has_mandatory_headers, 8, 8), Some(TestFinalityProof(8)))), - (9, (TestSourceHeader(false, 9, 9), Some(TestFinalityProof(9)))), - (10, (TestSourceHeader(false, 10, 10), Some(TestFinalityProof(10)))), - ] - .into_iter() - .collect(), - ); - async_std::task::block_on(select_header_to_submit( - &source_client, - &target_client, - &mut RestartableFinalityProofsStream::from(futures::stream::empty().boxed()), - &mut vec![], - 10, - 5, - &FinalitySyncParams { - tick: Duration::from_secs(0), - recent_finality_proofs_limit: 0, - stall_timeout: Duration::from_secs(0), - only_mandatory_headers, - }, - )) - .unwrap() -} - -#[test] -fn select_header_to_submit_skips_non_mandatory_headers_when_only_mandatory_headers_are_required() { - assert_eq!(run_only_mandatory_headers_mode_test(true, false), None); - assert_eq!( - run_only_mandatory_headers_mode_test(false, false), - Some((TestSourceHeader(false, 10, 10), TestFinalityProof(10))), - ); -} - -#[test] -fn select_header_to_submit_selects_mandatory_headers_when_only_mandatory_headers_are_required() { - assert_eq!( - run_only_mandatory_headers_mode_test(true, true), - Some((TestSourceHeader(true, 8, 8), TestFinalityProof(8))), - ); - assert_eq!( - run_only_mandatory_headers_mode_test(false, true), - Some((TestSourceHeader(true, 8, 8), TestFinalityProof(8))), - ); -} - -#[test] -fn select_better_recent_finality_proof_works() { - // if there are no unjustified headers, nothing is changed - assert_eq!( - select_better_recent_finality_proof::( - &[(5, TestFinalityProof(5))], - &mut vec![], - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ), - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ); - - // if there are no recent finality proofs, nothing is changed - assert_eq!( - select_better_recent_finality_proof::( - &[], - &mut vec![TestSourceHeader(false, 5, 5)], - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ), - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ); - - // if there's no intersection between recent finality proofs and unjustified headers, nothing is - // changed - let mut unjustified_headers = - vec![TestSourceHeader(false, 9, 9), TestSourceHeader(false, 10, 10)]; - assert_eq!( - select_better_recent_finality_proof::( - &[(1, TestFinalityProof(1)), (4, TestFinalityProof(4))], - &mut unjustified_headers, - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ), - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ); - - // if there's intersection between recent finality proofs and unjustified headers, but there are - // no proofs in this intersection, nothing is changed - let mut unjustified_headers = vec![ - TestSourceHeader(false, 8, 8), - TestSourceHeader(false, 9, 9), - TestSourceHeader(false, 10, 10), - ]; - assert_eq!( - select_better_recent_finality_proof::( - &[(7, TestFinalityProof(7)), (11, TestFinalityProof(11))], - &mut unjustified_headers, - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ), - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ); - assert_eq!( - unjustified_headers, - vec![ - TestSourceHeader(false, 8, 8), - TestSourceHeader(false, 9, 9), - TestSourceHeader(false, 10, 10) - ] - ); - - // if there's intersection between recent finality proofs and unjustified headers and there's - // a proof in this intersection: - // - this better (last from intersection) proof is selected; - // - 'obsolete' unjustified headers are pruned. - let mut unjustified_headers = vec![ - TestSourceHeader(false, 8, 8), - TestSourceHeader(false, 9, 9), - TestSourceHeader(false, 10, 10), - ]; - assert_eq!( - select_better_recent_finality_proof::( - &[(7, TestFinalityProof(7)), (9, TestFinalityProof(9))], - &mut unjustified_headers, - Some((TestSourceHeader(false, 2, 2), TestFinalityProof(2))), - ), - Some((TestSourceHeader(false, 9, 9), TestFinalityProof(9))), - ); -} - -#[test] -fn read_finality_proofs_from_stream_works() { - // when stream is currently empty, nothing is changed - let mut recent_finality_proofs = vec![(1, TestFinalityProof(1))]; - let mut stream = futures::stream::pending().into(); - read_finality_proofs_from_stream::( - &mut stream, - &mut recent_finality_proofs, - ); - assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1))]); - assert!(!stream.needs_restart); - - // when stream has entry with target, it is added to the recent proofs container - let mut stream = futures::stream::iter(vec![TestFinalityProof(4)]) - .chain(futures::stream::pending()) - .into(); - read_finality_proofs_from_stream::( - &mut stream, - &mut recent_finality_proofs, - ); - assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]); - assert!(!stream.needs_restart); - - // when stream has ended, we'll need to restart it - let mut stream = futures::stream::empty().into(); - read_finality_proofs_from_stream::( - &mut stream, - &mut recent_finality_proofs, - ); - assert_eq!(recent_finality_proofs, vec![(1, TestFinalityProof(1)), (4, TestFinalityProof(4))]); - assert!(stream.needs_restart); -} - -#[test] -fn prune_recent_finality_proofs_works() { - let original_recent_finality_proofs: FinalityProofs = vec![ - (10, TestFinalityProof(10)), - (13, TestFinalityProof(13)), - (15, TestFinalityProof(15)), - (17, TestFinalityProof(17)), - (19, TestFinalityProof(19)), - ] - .into_iter() - .collect(); - - // when there's proof for justified header in the vec - let mut recent_finality_proofs = original_recent_finality_proofs.clone(); - prune_recent_finality_proofs::(10, &mut recent_finality_proofs, 1024); - assert_eq!(&original_recent_finality_proofs[1..], recent_finality_proofs); - - // when there are no proof for justified header in the vec - let mut recent_finality_proofs = original_recent_finality_proofs.clone(); - prune_recent_finality_proofs::(11, &mut recent_finality_proofs, 1024); - assert_eq!(&original_recent_finality_proofs[1..], recent_finality_proofs); - - // when there are too many entries after initial prune && they also need to be pruned - let mut recent_finality_proofs = original_recent_finality_proofs.clone(); - prune_recent_finality_proofs::(10, &mut recent_finality_proofs, 2); - assert_eq!(&original_recent_finality_proofs[3..], recent_finality_proofs); - - // when last entry is pruned - let mut recent_finality_proofs = original_recent_finality_proofs.clone(); - prune_recent_finality_proofs::(19, &mut recent_finality_proofs, 2); - assert_eq!(&original_recent_finality_proofs[5..], recent_finality_proofs); - - // when post-last entry is pruned - let mut recent_finality_proofs = original_recent_finality_proofs.clone(); - prune_recent_finality_proofs::(20, &mut recent_finality_proofs, 2); - assert_eq!(&original_recent_finality_proofs[5..], recent_finality_proofs); -} - -#[test] -fn different_forks_at_source_and_at_target_are_detected() { - let (exit_sender, _exit_receiver) = futures::channel::mpsc::unbounded(); - let (source_client, target_client) = prepare_test_clients( - exit_sender, - |_| false, - vec![ - (5, (TestSourceHeader(false, 5, 42), None)), - (6, (TestSourceHeader(false, 6, 6), None)), - (7, (TestSourceHeader(false, 7, 7), None)), - (8, (TestSourceHeader(false, 8, 8), None)), - (9, (TestSourceHeader(false, 9, 9), None)), - (10, (TestSourceHeader(false, 10, 10), None)), - ] - .into_iter() - .collect(), - ); - - let mut progress = (Instant::now(), None); - let mut finality_proofs_stream = RestartableFinalityProofsStream { - needs_restart: false, - stream: Box::pin(futures::stream::iter(vec![]).boxed()), - }; - let mut recent_finality_proofs = Vec::new(); - let metrics_sync = SyncLoopMetrics::new(None, "source", "target").unwrap(); - async_std::task::block_on(run_loop_iteration::( - &source_client, - &target_client, - FinalityLoopState { - progress: &mut progress, - finality_proofs_stream: &mut finality_proofs_stream, - recent_finality_proofs: &mut recent_finality_proofs, - last_transaction: None, - }, - &test_sync_params(), - &Some(metrics_sync.clone()), - )) - .unwrap(); - - assert!(!metrics_sync.is_using_same_fork()); -} diff --git a/polkadot/bridges/relays/finality/src/lib.rs b/polkadot/bridges/relays/finality/src/lib.rs deleted file mode 100644 index 49be64ff74d..00000000000 --- a/polkadot/bridges/relays/finality/src/lib.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! This crate has single entrypoint to run synchronization loop that is built around finality -//! proofs, as opposed to headers synchronization loop, which is built around headers. The headers -//! are still submitted to the target node, but are treated as auxiliary data as we are not trying -//! to submit all source headers to the target node. - -pub use crate::{ - finality_loop::{metrics_prefix, run, FinalitySyncParams, SourceClient, TargetClient}, - sync_loop_metrics::SyncLoopMetrics, -}; - -use bp_header_chain::FinalityProof; -use std::fmt::Debug; - -mod finality_loop; -mod finality_loop_tests; -mod sync_loop_metrics; - -/// Finality proofs synchronization pipeline. -pub trait FinalitySyncPipeline: 'static + Clone + Debug + Send + Sync { - /// Name of the finality proofs source. - const SOURCE_NAME: &'static str; - /// Name of the finality proofs target. - const TARGET_NAME: &'static str; - - /// Headers we're syncing are identified by this hash. - type Hash: Eq + Clone + Copy + Send + Sync + Debug; - /// Headers we're syncing are identified by this number. - type Number: relay_utils::BlockNumberBase; - /// Type of header that we're syncing. - type Header: SourceHeader; - /// Finality proof type. - type FinalityProof: FinalityProof; -} - -/// Header that we're receiving from source node. -pub trait SourceHeader: Clone + Debug + PartialEq + Send + Sync { - /// Returns hash of header. - fn hash(&self) -> Hash; - /// Returns number of header. - fn number(&self) -> Number; - /// Returns true if this header needs to be submitted to target node. - fn is_mandatory(&self) -> bool; -} diff --git a/polkadot/bridges/relays/finality/src/sync_loop_metrics.rs b/polkadot/bridges/relays/finality/src/sync_loop_metrics.rs deleted file mode 100644 index a003a47d890..00000000000 --- a/polkadot/bridges/relays/finality/src/sync_loop_metrics.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for headers synchronization relay loop. - -use relay_utils::metrics::{metric_name, register, IntGauge, Metric, PrometheusError, Registry}; - -/// Headers sync metrics. -#[derive(Clone)] -pub struct SyncLoopMetrics { - /// Best syncing header at the source. - best_source_block_number: IntGauge, - /// Best syncing header at the target. - best_target_block_number: IntGauge, - /// Flag that has `0` value when best source headers at the source node and at-target-chain - /// are matching and `1` otherwise. - using_different_forks: IntGauge, -} - -impl SyncLoopMetrics { - /// Create and register headers loop metrics. - pub fn new( - prefix: Option<&str>, - at_source_chain_label: &str, - at_target_chain_label: &str, - ) -> Result { - Ok(SyncLoopMetrics { - best_source_block_number: IntGauge::new( - metric_name(prefix, &format!("best_{}_block_number", at_source_chain_label)), - format!("Best block number at the {}", at_source_chain_label), - )?, - best_target_block_number: IntGauge::new( - metric_name(prefix, &format!("best_{}_block_number", at_target_chain_label)), - format!("Best block number at the {}", at_target_chain_label), - )?, - using_different_forks: IntGauge::new( - metric_name(prefix, &format!("is_{}_and_{}_using_different_forks", at_source_chain_label, at_target_chain_label)), - "Whether the best finalized source block at target node is different (value 1) from the \ - corresponding block at the source node", - )?, - }) - } - - /// Returns current value of the using-same-fork flag. - #[cfg(test)] - pub(crate) fn is_using_same_fork(&self) -> bool { - self.using_different_forks.get() == 0 - } - - /// Update best block number at source. - pub fn update_best_block_at_source>(&self, source_best_number: Number) { - self.best_source_block_number.set(source_best_number.into()); - } - - /// Update best block number at target. - pub fn update_best_block_at_target>(&self, target_best_number: Number) { - self.best_target_block_number.set(target_best_number.into()); - } - - /// Update using-same-fork flag. - pub fn update_using_same_fork(&self, using_same_fork: bool) { - self.using_different_forks.set(if using_same_fork { 0 } else { 1 }) - } -} - -impl Metric for SyncLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.best_source_block_number.clone(), registry)?; - register(self.best_target_block_number.clone(), registry)?; - register(self.using_different_forks.clone(), registry)?; - Ok(()) - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml b/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml deleted file mode 100644 index 9ca79b96701..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/Cargo.toml +++ /dev/null @@ -1,52 +0,0 @@ -[package] -name = "substrate-relay-helper" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -anyhow = "1.0" -thiserror = "1.0.26" -async-std = "1.9.0" -async-trait = "0.1.42" -codec = { package = "parity-scale-codec", version = "3.0.0" } -futures = "0.3.12" -num-traits = "0.2" -log = "0.4.14" - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bridge-runtime-common = { path = "../../bin/runtime-common" } - -finality-grandpa = { version = "0.16.0" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } -messages-relay = { path = "../messages" } -relay-substrate-client = { path = "../client-substrate" } - -pallet-bridge-grandpa = { path = "../../modules/grandpa" } -pallet-bridge-messages = { path = "../../modules/messages" } - -bp-runtime = { path = "../../primitives/runtime" } -bp-messages = { path = "../../primitives/messages" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[dev-dependencies] -bp-millau = { path = "../../primitives/chain-millau" } -bp-rialto = { path = "../../primitives/chain-rialto" } -bp-rococo = { path = "../../primitives/chain-rococo" } -bp-wococo = { path = "../../primitives/chain-wococo" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -relay-rococo-client = { path = "../client-rococo" } -relay-wococo-client = { path = "../client-wococo" } -rialto-runtime = { path = "../../bin/rialto/runtime" } diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs b/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs deleted file mode 100644 index 469bc558993..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/conversion_rate_update.rs +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools for updating conversion rate that is stored in the runtime storage. - -use crate::{messages_lane::SubstrateMessageLane, TransactionParams}; - -use codec::Encode; -use relay_substrate_client::{ - transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, CallOf, Chain, Client, SignParam, - TransactionEra, TransactionSignScheme, UnsignedTransaction, -}; -use relay_utils::metrics::F64SharedRef; -use sp_core::{Bytes, Pair}; -use std::time::{Duration, Instant}; - -/// Duration between updater iterations. -const SLEEP_DURATION: Duration = Duration::from_secs(60); - -/// Duration which will almost never expire. Since changing conversion rate may require manual -/// intervention (e.g. if call is made through `multisig` pallet), we don't want relayer to -/// resubmit transaction often. -const ALMOST_NEVER_DURATION: Duration = Duration::from_secs(60 * 60 * 24 * 30); - -/// Update-conversion-rate transaction status. -#[derive(Debug, Clone, Copy, PartialEq)] -enum TransactionStatus { - /// We have not submitted any transaction recently. - Idle, - /// We have recently submitted transaction that should update conversion rate. - Submitted(Instant, f64), -} - -/// Different ways of building 'update conversion rate' calls. -pub trait UpdateConversionRateCallBuilder { - /// Given conversion rate, build call that updates conversion rate in given chain runtime - /// storage. - fn build_update_conversion_rate_call(conversion_rate: f64) -> anyhow::Result>; -} - -impl UpdateConversionRateCallBuilder for () { - fn build_update_conversion_rate_call(_conversion_rate: f64) -> anyhow::Result> { - Err(anyhow::format_err!("Conversion rate update is not supported at {}", C::NAME)) - } -} - -/// Macro that generates `UpdateConversionRateCallBuilder` implementation for the case when -/// you have a direct access to the source chain runtime. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_direct_update_conversion_rate_call_builder { - ( - $source_chain:ident, - $mocked_builder:ident, - $runtime:ty, - $instance:ty, - $parameter:path - ) => { - pub struct $mocked_builder; - - impl $crate::conversion_rate_update::UpdateConversionRateCallBuilder<$source_chain> - for $mocked_builder - { - fn build_update_conversion_rate_call( - conversion_rate: f64, - ) -> anyhow::Result> { - Ok(pallet_bridge_messages::Call::update_pallet_parameter::<$runtime, $instance> { - parameter: $parameter(sp_runtime::FixedU128::from_float(conversion_rate)), - }.into()) - } - } - }; -} - -/// Macro that generates `UpdateConversionRateCallBuilder` implementation for the case when -/// you only have an access to the mocked version of source chain runtime. In this case you -/// should provide "name" of the call variant for the bridge messages calls, the "name" of -/// the variant for the `update_pallet_parameter` call within that first option and the name -/// of the conversion rate parameter itself. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_mocked_update_conversion_rate_call_builder { - ( - $source_chain:ident, - $mocked_builder:ident, - $bridge_messages:path, - $update_pallet_parameter:path, - $parameter:path - ) => { - pub struct $mocked_builder; - - impl $crate::conversion_rate_update::UpdateConversionRateCallBuilder<$source_chain> - for $mocked_builder - { - fn build_update_conversion_rate_call( - conversion_rate: f64, - ) -> anyhow::Result> { - Ok($bridge_messages($update_pallet_parameter($parameter( - sp_runtime::FixedU128::from_float(conversion_rate), - )))) - } - } - }; -} - -/// Run infinite conversion rate updater loop. -/// -/// The loop is maintaining the Left -> Right conversion rate, used as `RightTokens = LeftTokens * -/// Rate`. -pub fn run_conversion_rate_update_loop( - client: Client, - transaction_params: TransactionParams>, - left_to_right_stored_conversion_rate: F64SharedRef, - left_to_base_conversion_rate: F64SharedRef, - right_to_base_conversion_rate: F64SharedRef, - max_difference_ratio: f64, -) where - Lane: SubstrateMessageLane, - Sign: TransactionSignScheme, - AccountIdOf: From< as Pair>::Public>, -{ - let stall_timeout = transaction_stall_timeout( - transaction_params.mortality, - Lane::SourceChain::AVERAGE_BLOCK_INTERVAL, - ALMOST_NEVER_DURATION, - ); - - log::info!( - target: "bridge", - "Starting {} -> {} conversion rate (on {}) update loop. Stall timeout: {}s", - Lane::TargetChain::NAME, - Lane::SourceChain::NAME, - Lane::SourceChain::NAME, - stall_timeout.as_secs(), - ); - - async_std::task::spawn(async move { - let mut transaction_status = TransactionStatus::Idle; - loop { - async_std::task::sleep(SLEEP_DURATION).await; - let maybe_new_conversion_rate = maybe_select_new_conversion_rate( - stall_timeout, - &mut transaction_status, - &left_to_right_stored_conversion_rate, - &left_to_base_conversion_rate, - &right_to_base_conversion_rate, - max_difference_ratio, - ) - .await; - if let Some((prev_conversion_rate, new_conversion_rate)) = maybe_new_conversion_rate { - log::info!( - target: "bridge", - "Going to update {} -> {} (on {}) conversion rate to {}.", - Lane::TargetChain::NAME, - Lane::SourceChain::NAME, - Lane::SourceChain::NAME, - new_conversion_rate, - ); - - let result = update_target_to_source_conversion_rate::( - client.clone(), - transaction_params.clone(), - new_conversion_rate, - ) - .await; - match result { - Ok(()) => { - transaction_status = - TransactionStatus::Submitted(Instant::now(), prev_conversion_rate); - }, - Err(error) => { - log::error!( - target: "bridge", - "Failed to submit conversion rate update transaction: {:?}", - error, - ); - }, - } - } - } - }); -} - -/// Select new conversion rate to submit to the node. -async fn maybe_select_new_conversion_rate( - stall_timeout: Duration, - transaction_status: &mut TransactionStatus, - left_to_right_stored_conversion_rate: &F64SharedRef, - left_to_base_conversion_rate: &F64SharedRef, - right_to_base_conversion_rate: &F64SharedRef, - max_difference_ratio: f64, -) -> Option<(f64, f64)> { - let left_to_right_stored_conversion_rate = - (*left_to_right_stored_conversion_rate.read().await)?; - match *transaction_status { - TransactionStatus::Idle => (), - TransactionStatus::Submitted(submitted_at, _) - if Instant::now() - submitted_at > stall_timeout => - { - log::error!( - target: "bridge", - "Conversion rate update transaction has been lost and loop stalled. Restarting", - ); - - // we assume that our transaction has been lost - *transaction_status = TransactionStatus::Idle; - }, - TransactionStatus::Submitted(_, previous_left_to_right_stored_conversion_rate) => { - // we can't compare float values from different sources directly, so we only care - // whether the stored rate has been changed or not. If it has been changed, then we - // assume that our proposal has been accepted. - // - // float comparison is ok here, because we compare same-origin (stored in runtime - // storage) values and if they are different, it means that the value has actually been - // updated - #[allow(clippy::float_cmp)] - if previous_left_to_right_stored_conversion_rate == left_to_right_stored_conversion_rate - { - // the rate has not been changed => we won't submit any transactions until it is - // accepted, or the rate is changed by someone else - return None - } - - *transaction_status = TransactionStatus::Idle; - }, - } - - let left_to_base_conversion_rate = (*left_to_base_conversion_rate.read().await)?; - let right_to_base_conversion_rate = (*right_to_base_conversion_rate.read().await)?; - let actual_left_to_right_conversion_rate = - left_to_base_conversion_rate / right_to_base_conversion_rate; - - let rate_difference = - (actual_left_to_right_conversion_rate - left_to_right_stored_conversion_rate).abs(); - let rate_difference_ratio = rate_difference / left_to_right_stored_conversion_rate; - if rate_difference_ratio < max_difference_ratio { - return None - } - - Some((left_to_right_stored_conversion_rate, actual_left_to_right_conversion_rate)) -} - -/// Update Target -> Source tokens conversion rate, stored in the Source runtime storage. -pub async fn update_target_to_source_conversion_rate( - client: Client, - transaction_params: TransactionParams>, - updated_rate: f64, -) -> anyhow::Result<()> -where - Lane: SubstrateMessageLane, - Sign: TransactionSignScheme, - AccountIdOf: From< as Pair>::Public>, -{ - let genesis_hash = *client.genesis_hash(); - let signer_id = transaction_params.signer.public().into(); - let (spec_version, transaction_version) = client.simple_runtime_version().await?; - let call = - Lane::TargetToSourceChainConversionRateUpdateBuilder::build_update_conversion_rate_call( - updated_rate, - )?; - client - .submit_signed_extrinsic(signer_id, move |best_block_id, transaction_nonce| { - Ok(Bytes( - Sign::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash, - signer: transaction_params.signer, - era: TransactionEra::new(best_block_id, transaction_params.mortality), - unsigned: UnsignedTransaction::new(call.into(), transaction_nonce).into(), - })? - .encode(), - )) - }) - .await - .map(drop) - .map_err(|err| anyhow::format_err!("{:?}", err)) -} - -#[cfg(test)] -mod tests { - use super::*; - use async_std::sync::{Arc, RwLock}; - - const TEST_STALL_TIMEOUT: Duration = Duration::from_secs(60); - - fn test_maybe_select_new_conversion_rate( - mut transaction_status: TransactionStatus, - stored_conversion_rate: Option, - left_to_base_conversion_rate: Option, - right_to_base_conversion_rate: Option, - max_difference_ratio: f64, - ) -> (Option<(f64, f64)>, TransactionStatus) { - let stored_conversion_rate = Arc::new(RwLock::new(stored_conversion_rate)); - let left_to_base_conversion_rate = Arc::new(RwLock::new(left_to_base_conversion_rate)); - let right_to_base_conversion_rate = Arc::new(RwLock::new(right_to_base_conversion_rate)); - let result = async_std::task::block_on(maybe_select_new_conversion_rate( - TEST_STALL_TIMEOUT, - &mut transaction_status, - &stored_conversion_rate, - &left_to_base_conversion_rate, - &right_to_base_conversion_rate, - max_difference_ratio, - )); - (result, transaction_status) - } - - #[test] - fn rate_is_not_updated_when_transaction_is_submitted() { - let status = TransactionStatus::Submitted(Instant::now(), 10.0); - assert_eq!( - test_maybe_select_new_conversion_rate(status, Some(10.0), Some(1.0), Some(1.0), 0.0), - (None, status), - ); - } - - #[test] - fn transaction_state_is_changed_to_idle_when_stored_rate_shanges() { - assert_eq!( - test_maybe_select_new_conversion_rate( - TransactionStatus::Submitted(Instant::now(), 1.0), - Some(10.0), - Some(1.0), - Some(1.0), - 100.0 - ), - (None, TransactionStatus::Idle), - ); - } - - #[test] - fn transaction_is_not_submitted_when_left_to_base_rate_is_unknown() { - assert_eq!( - test_maybe_select_new_conversion_rate( - TransactionStatus::Idle, - Some(10.0), - None, - Some(1.0), - 0.0 - ), - (None, TransactionStatus::Idle), - ); - } - - #[test] - fn transaction_is_not_submitted_when_right_to_base_rate_is_unknown() { - assert_eq!( - test_maybe_select_new_conversion_rate( - TransactionStatus::Idle, - Some(10.0), - Some(1.0), - None, - 0.0 - ), - (None, TransactionStatus::Idle), - ); - } - - #[test] - fn transaction_is_not_submitted_when_stored_rate_is_unknown() { - assert_eq!( - test_maybe_select_new_conversion_rate( - TransactionStatus::Idle, - None, - Some(1.0), - Some(1.0), - 0.0 - ), - (None, TransactionStatus::Idle), - ); - } - - #[test] - fn transaction_is_not_submitted_when_difference_is_below_threshold() { - assert_eq!( - test_maybe_select_new_conversion_rate( - TransactionStatus::Idle, - Some(1.0), - Some(1.0), - Some(1.01), - 0.02 - ), - (None, TransactionStatus::Idle), - ); - } - - #[test] - fn transaction_is_submitted_when_difference_is_above_threshold() { - let left_to_right_stored_conversion_rate = 1.0; - let left_to_base_conversion_rate = 18f64; - let right_to_base_conversion_rate = 180f64; - - assert!(left_to_base_conversion_rate < right_to_base_conversion_rate); - - assert_eq!( - test_maybe_select_new_conversion_rate( - TransactionStatus::Idle, - Some(left_to_right_stored_conversion_rate), - Some(left_to_base_conversion_rate), - Some(right_to_base_conversion_rate), - 0.02 - ), - ( - Some(( - left_to_right_stored_conversion_rate, - left_to_base_conversion_rate / right_to_base_conversion_rate, - )), - TransactionStatus::Idle - ), - ); - } - - #[test] - fn transaction_expires() { - let status = TransactionStatus::Submitted(Instant::now() - TEST_STALL_TIMEOUT / 2, 10.0); - assert_eq!( - test_maybe_select_new_conversion_rate(status, Some(10.0), Some(1.0), Some(1.0), 0.0), - (None, status), - ); - - let status = TransactionStatus::Submitted(Instant::now() - TEST_STALL_TIMEOUT * 2, 10.0); - assert_eq!( - test_maybe_select_new_conversion_rate(status, Some(10.0), Some(1.0), Some(1.0), 0.0), - (Some((10.0, 1.0)), TransactionStatus::Idle), - ); - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/error.rs b/polkadot/bridges/relays/lib-substrate-relay/src/error.rs deleted file mode 100644 index 9402d55e379..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/error.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relay errors. - -use relay_substrate_client as client; -use sp_finality_grandpa::AuthorityList; -use sp_runtime::traits::MaybeDisplay; -use std::fmt::Debug; -use thiserror::Error; - -/// Relay errors. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to submit signed extrinsic from to the target chain. - #[error("Failed to submit {0} transaction: {1:?}")] - SubmitTransaction(&'static str, client::Error), - /// Failed subscribe to justification stream of the source chain. - #[error("Failed to subscribe to {0} justifications: {1:?}")] - Subscribe(&'static str, client::Error), - /// Failed subscribe to read justification from the source chain (client error). - #[error("Failed to read {0} justification from the stream: {1}")] - ReadJustification(&'static str, client::Error), - /// Failed subscribe to read justification from the source chain (stream ended). - #[error("Failed to read {0} justification from the stream: stream has ended unexpectedly")] - ReadJustificationStreamEnded(&'static str), - /// Failed subscribe to decode justification from the source chain. - #[error("Failed to decode {0} justification: {1:?}")] - DecodeJustification(&'static str, codec::Error), - /// GRANDPA authorities read from the source chain are invalid. - #[error("Read invalid {0} authorities set: {1:?}")] - ReadInvalidAuthorities(&'static str, AuthorityList), - /// Failed to guess initial GRANDPA authorities at the given header of the source chain. - #[error("Failed to guess initial {0} GRANDPA authorities set id: checked all possible ids in range [0; {1}]")] - GuessInitialAuthorities(&'static str, HeaderNumber), - /// Failed to retrieve GRANDPA authorities at the given header from the source chain. - #[error("Failed to retrive {0} GRANDPA authorities set at header {1}: {2:?}")] - RetrieveAuthorities(&'static str, Hash, client::Error), - /// Failed to decode GRANDPA authorities at the given header of the source chain. - #[error("Failed to decode {0} GRANDPA authorities set at header {1}: {2:?}")] - DecodeAuthorities(&'static str, Hash, codec::Error), - /// Failed to retrieve header by the hash from the source chain. - #[error("Failed to retrieve {0} header with hash {1}: {:?}")] - RetrieveHeader(&'static str, Hash, client::Error), - /// Failed to retrieve best finalized source header hash from the target chain. - #[error("Failed to retrieve best finalized {0} header from the target chain: {1}")] - RetrieveBestFinalizedHeaderHash(&'static str, client::Error), -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/finality_guards.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_guards.rs deleted file mode 100644 index a3e69afe1b1..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/finality_guards.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools for starting guards of finality relays. - -use crate::TransactionParams; - -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, ChainWithBalances, TransactionSignScheme, -}; -use sp_core::Pair; - -/// Start finality relay guards. -pub async fn start>( - target_client: &relay_substrate_client::Client, - transaction_params: &TransactionParams, - enable_version_guard: bool, - maximal_balance_decrease_per_day: C::Balance, -) -> relay_substrate_client::Result<()> -where - AccountIdOf: From< as Pair>::Public>, -{ - if enable_version_guard { - relay_substrate_client::guard::abort_on_spec_version_change( - target_client.clone(), - target_client.simple_runtime_version().await?.0, - ); - } - relay_substrate_client::guard::abort_when_account_balance_decreased( - target_client.clone(), - transaction_params.signer.public().into(), - maximal_balance_decrease_per_day, - ); - Ok(()) -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs deleted file mode 100644 index 3daf8d11440..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/finality_pipeline.rs +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types and functions intended to ease adding of new Substrate -> Substrate -//! finality proofs synchronization pipelines. - -use crate::{ - finality_source::SubstrateFinalitySource, finality_target::SubstrateFinalityTarget, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_header_chain::justification::GrandpaJustification; -use finality_relay::FinalitySyncPipeline; -use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; -use relay_substrate_client::{ - transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, - ChainWithGrandpa, Client, HashOf, HeaderOf, SyncHeader, TransactionSignScheme, -}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; -use std::{fmt::Debug, marker::PhantomData}; - -/// Default limit of recent finality proofs. -/// -/// Finality delay of 4096 blocks is unlikely to happen in practice in -/// Substrate+GRANDPA based chains (good to know). -pub(crate) const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; - -/// Substrate -> Substrate finality proofs synchronization pipeline. -#[async_trait] -pub trait SubstrateFinalitySyncPipeline: 'static + Clone + Debug + Send + Sync { - /// Headers of this chain are submitted to the `TargetChain`. - type SourceChain: ChainWithGrandpa; - /// Headers of the `SourceChain` are submitted to this chain. - type TargetChain: Chain; - - /// How submit finality proof call is built? - type SubmitFinalityProofCallBuilder: SubmitFinalityProofCallBuilder; - /// Scheme used to sign target chain transactions. - type TransactionSignScheme: TransactionSignScheme; - - /// Add relay guards if required. - async fn start_relay_guards( - _target_client: &Client, - _transaction_params: &TransactionParams>, - _enable_version_guard: bool, - ) -> relay_substrate_client::Result<()> { - Ok(()) - } -} - -/// Adapter that allows all `SubstrateFinalitySyncPipeline` to act as `FinalitySyncPipeline`. -#[derive(Clone, Debug)] -pub struct FinalitySyncPipelineAdapter { - _phantom: PhantomData

, -} - -impl FinalitySyncPipeline for FinalitySyncPipelineAdapter

{ - const SOURCE_NAME: &'static str = P::SourceChain::NAME; - const TARGET_NAME: &'static str = P::TargetChain::NAME; - - type Hash = HashOf; - type Number = BlockNumberOf; - type Header = relay_substrate_client::SyncHeader>; - type FinalityProof = GrandpaJustification>; -} - -/// Different ways of building `submit_finality_proof` calls. -pub trait SubmitFinalityProofCallBuilder { - /// Given source chain header and its finality proofs, build call of `submit_finality_proof` - /// function of bridge GRANDPA module at the target chain. - fn build_submit_finality_proof_call( - header: SyncHeader>, - proof: GrandpaJustification>, - ) -> CallOf; -} - -/// Building `submit_finality_proof` call when you have direct access to the target -/// chain runtime. -pub struct DirectSubmitFinalityProofCallBuilder { - _phantom: PhantomData<(P, R, I)>, -} - -impl SubmitFinalityProofCallBuilder

for DirectSubmitFinalityProofCallBuilder -where - P: SubstrateFinalitySyncPipeline, - R: BridgeGrandpaConfig, - I: 'static, - R::BridgedChain: bp_runtime::Chain

>, - CallOf: From>, -{ - fn build_submit_finality_proof_call( - header: SyncHeader>, - proof: GrandpaJustification>, - ) -> CallOf { - BridgeGrandpaCall::::submit_finality_proof { - finality_target: Box::new(header.into_inner()), - justification: proof, - } - .into() - } -} - -/// Macro that generates `SubmitFinalityProofCallBuilder` implementation for the case when -/// you only have an access to the mocked version of target chain runtime. In this case you -/// should provide "name" of the call variant for the bridge GRANDPA calls and the "name" of -/// the variant for the `submit_finality_proof` call within that first option. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_mocked_submit_finality_proof_call_builder { - ($pipeline:ident, $mocked_builder:ident, $bridge_grandpa:path, $submit_finality_proof:path) => { - pub struct $mocked_builder; - - impl $crate::finality_pipeline::SubmitFinalityProofCallBuilder<$pipeline> - for $mocked_builder - { - fn build_submit_finality_proof_call( - header: relay_substrate_client::SyncHeader< - relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_pipeline::SubstrateFinalitySyncPipeline>::SourceChain - > - >, - proof: bp_header_chain::justification::GrandpaJustification< - relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_pipeline::SubstrateFinalitySyncPipeline>::SourceChain - > - >, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::finality_pipeline::SubstrateFinalitySyncPipeline>::TargetChain - > { - $bridge_grandpa($submit_finality_proof(Box::new(header.into_inner()), proof)) - } - } - }; -} - -/// Run Substrate-to-Substrate finality sync loop. -pub async fn run( - source_client: Client, - target_client: Client, - only_mandatory_headers: bool, - transaction_params: TransactionParams>, - metrics_params: MetricsParams, -) -> anyhow::Result<()> -where - AccountIdOf: From< as Pair>::Public>, - P::TransactionSignScheme: TransactionSignScheme, -{ - log::info!( - target: "bridge", - "Starting {} -> {} finality proof relay", - P::SourceChain::NAME, - P::TargetChain::NAME, - ); - - finality_relay::run( - SubstrateFinalitySource::

::new(source_client, None), - SubstrateFinalityTarget::

::new(target_client, transaction_params.clone()), - finality_relay::FinalitySyncParams { - tick: std::cmp::max( - P::SourceChain::AVERAGE_BLOCK_INTERVAL, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - ), - recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, - stall_timeout: transaction_stall_timeout( - transaction_params.mortality, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - crate::STALL_TIMEOUT, - ), - only_mandatory_headers, - }, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(|e| anyhow::format_err!("{}", e)) -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/finality_source.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_source.rs deleted file mode 100644 index 804d3212930..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/finality_source.rs +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Default generic implementation of finality source for basic Substrate client. - -use crate::finality_pipeline::{FinalitySyncPipelineAdapter, SubstrateFinalitySyncPipeline}; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use bp_header_chain::justification::GrandpaJustification; -use codec::Decode; -use finality_relay::SourceClient; -use futures::stream::{unfold, Stream, StreamExt}; -use relay_substrate_client::{ - BlockNumberOf, BlockWithJustification, Chain, Client, Error, HeaderOf, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_runtime::traits::Header as HeaderT; -use std::pin::Pin; - -/// Shared updatable reference to the maximal header number that we want to sync from the source. -pub type RequiredHeaderNumberRef = Arc::BlockNumber>>; - -/// Substrate finality proofs stream. -pub type SubstrateFinalityProofsStream

= Pin< - Box< - dyn Stream< - Item = GrandpaJustification< - HeaderOf<

::SourceChain>, - >, - > + Send, - >, ->; - -/// Substrate node as finality source. -pub struct SubstrateFinalitySource { - client: Client, - maximal_header_number: Option>, -} - -impl SubstrateFinalitySource

{ - /// Create new headers source using given client. - pub fn new( - client: Client, - maximal_header_number: Option>, - ) -> Self { - SubstrateFinalitySource { client, maximal_header_number } - } - - /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { - &self.client - } - - /// Returns best finalized block number. - pub async fn on_chain_best_finalized_block_number( - &self, - ) -> Result, Error> { - // we **CAN** continue to relay finality proofs if source node is out of sync, because - // target node may be missing proofs that are already available at the source - let finalized_header_hash = self.client.best_finalized_header_hash().await?; - let finalized_header = self.client.header_by_hash(finalized_header_hash).await?; - Ok(*finalized_header.number()) - } -} - -impl Clone for SubstrateFinalitySource

{ - fn clone(&self) -> Self { - SubstrateFinalitySource { - client: self.client.clone(), - maximal_header_number: self.maximal_header_number.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateFinalitySource

{ - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl SourceClient> - for SubstrateFinalitySource

-{ - type FinalityProofsStream = SubstrateFinalityProofsStream

; - - async fn best_finalized_block_number(&self) -> Result, Error> { - let mut finalized_header_number = self.on_chain_best_finalized_block_number().await?; - // never return block number larger than requested. This way we'll never sync headers - // past `maximal_header_number` - if let Some(ref maximal_header_number) = self.maximal_header_number { - let maximal_header_number = *maximal_header_number.lock().await; - if finalized_header_number > maximal_header_number { - finalized_header_number = maximal_header_number; - } - } - Ok(finalized_header_number) - } - - async fn header_and_finality_proof( - &self, - number: BlockNumberOf, - ) -> Result< - ( - relay_substrate_client::SyncHeader>, - Option>>, - ), - Error, - > { - let header_hash = self.client.block_hash_by_number(number).await?; - let signed_block = self.client.get_block(Some(header_hash)).await?; - - let justification = signed_block - .justification() - .map(|raw_justification| { - GrandpaJustification::>::decode( - &mut raw_justification.as_slice(), - ) - }) - .transpose() - .map_err(Error::ResponseParseFailed)?; - - Ok((signed_block.header().into(), justification)) - } - - async fn finality_proofs(&self) -> Result { - Ok(unfold( - self.client.clone().subscribe_justifications().await?, - move |subscription| async move { - loop { - let log_error = |err| { - log::error!( - target: "bridge", - "Failed to read justification target from the {} justifications stream: {:?}", - P::SourceChain::NAME, - err, - ); - }; - - let next_justification = subscription - .next() - .await - .map_err(|err| log_error(err.to_string())) - .ok()??; - - let decoded_justification = - GrandpaJustification::>::decode( - &mut &next_justification[..], - ); - - let justification = match decoded_justification { - Ok(j) => j, - Err(err) => { - log_error(format!("decode failed with error {:?}", err)); - continue - }, - }; - - return Some((justification, subscription)) - } - }, - ) - .boxed()) - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs b/polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs deleted file mode 100644 index 4c581417104..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/finality_target.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate finality proof target. The chain we connect to should have -//! bridge GRANDPA pallet deployed and provide `FinalityApi` to allow bridging -//! with chain. - -use crate::{ - finality_pipeline::{ - FinalitySyncPipelineAdapter, SubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, - }, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_header_chain::{justification::GrandpaJustification, storage_keys::is_halted_key}; -use codec::Encode; -use finality_relay::TargetClient; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, Chain, ChainWithGrandpa, Client, Error, HeaderIdOf, HeaderOf, - SignParam, SyncHeader, TransactionEra, TransactionSignScheme, UnsignedTransaction, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_core::{Bytes, Pair}; - -/// Substrate client as Substrate finality target. -pub struct SubstrateFinalityTarget { - client: Client, - transaction_params: TransactionParams>, -} - -impl SubstrateFinalityTarget

{ - /// Create new Substrate headers target. - pub fn new( - client: Client, - transaction_params: TransactionParams>, - ) -> Self { - SubstrateFinalityTarget { client, transaction_params } - } - - /// Ensure that the GRANDPA pallet at target chain is active. - pub async fn ensure_pallet_active(&self) -> Result<(), Error> { - let is_halted = self - .client - .storage_value(is_halted_key(P::SourceChain::WITH_CHAIN_GRANDPA_PALLET_NAME), None) - .await?; - if is_halted.unwrap_or(false) { - Err(Error::BridgePalletIsHalted) - } else { - Ok(()) - } - } -} - -impl Clone for SubstrateFinalityTarget

{ - fn clone(&self) -> Self { - SubstrateFinalityTarget { - client: self.client.clone(), - transaction_params: self.transaction_params.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateFinalityTarget

{ - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl TargetClient> - for SubstrateFinalityTarget

-where - AccountIdOf: From< as Pair>::Public>, - P::TransactionSignScheme: TransactionSignScheme, -{ - async fn best_finalized_source_block_id(&self) -> Result, Error> { - // we can't continue to relay finality if target node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - // we can't relay finality if GRANDPA pallet at target chain is halted - self.ensure_pallet_active().await?; - - Ok(crate::messages_source::read_client_state::( - &self.client, - None, - P::SourceChain::BEST_FINALIZED_HEADER_ID_METHOD, - ) - .await? - .best_finalized_peer_at_best_self) - } - - async fn submit_finality_proof( - &self, - header: SyncHeader>, - proof: GrandpaJustification>, - ) -> Result<(), Error> { - let genesis_hash = *self.client.genesis_hash(); - let transaction_params = self.transaction_params.clone(); - let call = - P::SubmitFinalityProofCallBuilder::build_submit_finality_proof_call(header, proof); - let (spec_version, transaction_version) = self.client.simple_runtime_version().await?; - self.client - .submit_signed_extrinsic( - self.transaction_params.signer.public().into(), - move |best_block_id, transaction_nonce| { - Ok(Bytes( - P::TransactionSignScheme::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash, - signer: transaction_params.signer.clone(), - era: TransactionEra::new(best_block_id, transaction_params.mortality), - unsigned: UnsignedTransaction::new(call.into(), transaction_nonce), - })? - .encode(), - )) - }, - ) - .await - .map(drop) - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs b/polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs deleted file mode 100644 index 0e1371c53c8..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/headers_initialize.rs +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Initialize Substrate -> Substrate headers bridge. -//! -//! Initialization is a transaction that calls `initialize()` function of the -//! `pallet-bridge-grandpa` pallet. This transaction brings initial header -//! and authorities set from source to target chain. The headers sync starts -//! with this header. - -use crate::error::Error; - -use bp_header_chain::{ - find_grandpa_authorities_scheduled_change, - justification::{verify_justification, GrandpaJustification}, - InitializationData, -}; -use codec::Decode; -use finality_grandpa::voter_set::VoterSet; -use num_traits::{One, Zero}; -use relay_substrate_client::{ - BlockNumberOf, Chain, ChainWithGrandpa, Client, Error as SubstrateError, HashOf, -}; -use sp_core::Bytes; -use sp_finality_grandpa::AuthorityList as GrandpaAuthoritiesSet; -use sp_runtime::traits::Header as HeaderT; - -/// Submit headers-bridge initialization transaction. -pub async fn initialize( - source_client: Client, - target_client: Client, - target_transactions_signer: TargetChain::AccountId, - prepare_initialize_transaction: impl FnOnce( - TargetChain::Index, - InitializationData, - ) -> Result - + Send - + 'static, -) { - let result = do_initialize( - source_client, - target_client, - target_transactions_signer, - prepare_initialize_transaction, - ) - .await; - - match result { - Ok(Some(tx_hash)) => log::info!( - target: "bridge", - "Successfully submitted {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - tx_hash, - ), - Ok(None) => (), - Err(err) => log::error!( - target: "bridge", - "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - err, - ), - } -} - -/// Craft and submit initialization transaction, returning any error that may occur. -async fn do_initialize( - source_client: Client, - target_client: Client, - target_transactions_signer: TargetChain::AccountId, - prepare_initialize_transaction: impl FnOnce( - TargetChain::Index, - InitializationData, - ) -> Result - + Send - + 'static, -) -> Result< - Option, - Error::Number>, -> { - let is_initialized = is_initialized::(&target_client).await?; - if is_initialized { - log::info!( - target: "bridge", - "{}-headers bridge at {} is already initialized. Skipping", - SourceChain::NAME, - TargetChain::NAME, - ); - return Ok(None) - } - - let initialization_data = prepare_initialization_data(source_client).await?; - log::info!( - target: "bridge", - "Prepared initialization data for {}-headers bridge at {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - initialization_data, - ); - - let initialization_tx_hash = target_client - .submit_signed_extrinsic(target_transactions_signer, move |_, transaction_nonce| { - prepare_initialize_transaction(transaction_nonce, initialization_data) - }) - .await - .map_err(|err| Error::SubmitTransaction(TargetChain::NAME, err))?; - Ok(Some(initialization_tx_hash)) -} - -/// Returns `Ok(true)` if bridge has already been initialized. -async fn is_initialized( - target_client: &Client, -) -> Result, BlockNumberOf>> { - Ok(target_client - .raw_storage_value( - bp_header_chain::storage_keys::best_finalized_hash_key( - SourceChain::WITH_CHAIN_GRANDPA_PALLET_NAME, - ), - None, - ) - .await - .map_err(|err| Error::RetrieveBestFinalizedHeaderHash(SourceChain::NAME, err))? - .is_some()) -} - -/// Prepare initialization data for the GRANDPA verifier pallet. -async fn prepare_initialization_data( - source_client: Client, -) -> Result< - InitializationData, - Error::Number>, -> { - // In ideal world we just need to get best finalized header and then to read GRANDPA authorities - // set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at this header. - // - // But now there are problems with this approach - `CurrentSetId` may return invalid value. So - // here we're waiting for the next justification, read the authorities set and then try to - // figure out the set id with bruteforce. - let justifications = source_client - .subscribe_justifications() - .await - .map_err(|err| Error::Subscribe(SourceChain::NAME, err))?; - // Read next justification - the header that it finalizes will be used as initial header. - let justification = justifications - .next() - .await - .map_err(|e| Error::ReadJustification(SourceChain::NAME, e)) - .and_then(|justification| { - justification.ok_or(Error::ReadJustificationStreamEnded(SourceChain::NAME)) - })?; - - // Read initial header. - let justification: GrandpaJustification = - Decode::decode(&mut &justification.0[..]) - .map_err(|err| Error::DecodeJustification(SourceChain::NAME, err))?; - - let (initial_header_hash, initial_header_number) = - (justification.commit.target_hash, justification.commit.target_number); - - let initial_header = source_header(&source_client, initial_header_hash).await?; - log::trace!(target: "bridge", "Selected {} initial header: {}/{}", - SourceChain::NAME, - initial_header_number, - initial_header_hash, - ); - - // Read GRANDPA authorities set at initial header. - let initial_authorities_set = - source_authorities_set(&source_client, initial_header_hash).await?; - log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}", - SourceChain::NAME, - initial_authorities_set, - ); - - // If initial header changes the GRANDPA authorities set, then we need previous authorities - // to verify justification. - let mut authorities_for_verification = initial_authorities_set.clone(); - let scheduled_change = find_grandpa_authorities_scheduled_change(&initial_header); - assert!( - scheduled_change.as_ref().map(|c| c.delay.is_zero()).unwrap_or(true), - "GRANDPA authorities change at {} scheduled to happen in {:?} blocks. We expect\ - regular hange to have zero delay", - initial_header_hash, - scheduled_change.as_ref().map(|c| c.delay), - ); - let schedules_change = scheduled_change.is_some(); - if schedules_change { - authorities_for_verification = - source_authorities_set(&source_client, *initial_header.parent_hash()).await?; - log::trace!( - target: "bridge", - "Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}", - SourceChain::NAME, - authorities_for_verification, - ); - } - - // Now let's try to guess authorities set id by verifying justification. - let mut initial_authorities_set_id = 0; - let mut min_possible_block_number = SourceChain::BlockNumber::zero(); - let authorities_for_verification = VoterSet::new(authorities_for_verification.clone()) - .ok_or(Error::ReadInvalidAuthorities(SourceChain::NAME, authorities_for_verification))?; - loop { - log::trace!( - target: "bridge", "Trying {} GRANDPA authorities set id: {}", - SourceChain::NAME, - initial_authorities_set_id, - ); - - let is_valid_set_id = verify_justification::( - (initial_header_hash, initial_header_number), - initial_authorities_set_id, - &authorities_for_verification, - &justification, - ) - .is_ok(); - - if is_valid_set_id { - break - } - - initial_authorities_set_id += 1; - min_possible_block_number += One::one(); - if min_possible_block_number > initial_header_number { - // there can't be more authorities set changes than headers => if we have reached - // `initial_block_number` and still have not found correct value of - // `initial_authorities_set_id`, then something else is broken => fail - return Err(Error::GuessInitialAuthorities(SourceChain::NAME, initial_header_number)) - } - } - - Ok(InitializationData { - header: Box::new(initial_header), - authority_list: initial_authorities_set, - set_id: if schedules_change { - initial_authorities_set_id + 1 - } else { - initial_authorities_set_id - }, - is_halted: false, - }) -} - -/// Read header by hash from the source client. -async fn source_header( - source_client: &Client, - header_hash: SourceChain::Hash, -) -> Result::Number>> -{ - source_client - .header_by_hash(header_hash) - .await - .map_err(|err| Error::RetrieveHeader(SourceChain::NAME, header_hash, err)) -} - -/// Read GRANDPA authorities set at given header. -async fn source_authorities_set( - source_client: &Client, - header_hash: SourceChain::Hash, -) -> Result::Number>> -{ - let raw_authorities_set = source_client - .grandpa_authorities_set(header_hash) - .await - .map_err(|err| Error::RetrieveAuthorities(SourceChain::NAME, header_hash, err))?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) - .map_err(|err| Error::DecodeAuthorities(SourceChain::NAME, header_hash, err)) -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs b/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs deleted file mode 100644 index 80359b1c2a9..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/helpers.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate relay helpers - -use relay_utils::metrics::{FloatJsonValueMetric, PrometheusError, StandaloneMetric}; - -/// Creates standalone token price metric. -pub fn token_price_metric(token_id: &str) -> Result { - FloatJsonValueMetric::new( - format!("https://api.coingecko.com/api/v3/simple/price?ids={}&vs_currencies=btc", token_id), - format!("$.{}.btc", token_id), - format!("{}_to_base_conversion_rate", token_id.replace('-', "_")), - format!("Rate used to convert from {} to some BASE tokens", token_id.to_uppercase()), - ) -} - -/// Compute conversion rate between two tokens immediately, without spawning any metrics. -/// -/// Returned rate may be used in expression: `from_tokens * rate -> to_tokens`. -pub async fn tokens_conversion_rate_from_metrics( - from_token_id: &str, - to_token_id: &str, -) -> anyhow::Result { - let from_token_metric = token_price_metric(from_token_id)?; - from_token_metric.update().await; - let to_token_metric = token_price_metric(to_token_id)?; - to_token_metric.update().await; - - let from_token_value = *from_token_metric.shared_value_ref().read().await; - let to_token_value = *to_token_metric.shared_value_ref().read().await; - // `FloatJsonValueMetric` guarantees that the value is positive && normal, so no additional - // checks required here - match (from_token_value, to_token_value) { - (Some(from_token_value), Some(to_token_value)) => - Ok(tokens_conversion_rate(from_token_value, to_token_value)), - _ => Err(anyhow::format_err!( - "Failed to compute conversion rate from {} to {}", - from_token_id, - to_token_id, - )), - } -} - -/// Compute conversion rate between two tokens, given token prices. -/// -/// Returned rate may be used in expression: `from_tokens * rate -> to_tokens`. -/// -/// Both prices are assumed to be normal and non-negative. -pub fn tokens_conversion_rate(from_token_value: f64, to_token_value: f64) -> f64 { - from_token_value / to_token_value -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn rialto_to_millau_conversion_rate_is_correct() { - let rialto_price = 18.18; - let millau_price = 136.35; - assert!(rialto_price < millau_price); - - let conversion_rate = tokens_conversion_rate(rialto_price, millau_price); - let rialto_amount = 100.0; - let millau_amount = rialto_amount * conversion_rate; - assert!( - rialto_amount > millau_amount, - "{} RLT * {} = {} MLU", - rialto_amount, - conversion_rate, - millau_amount, - ); - } - - #[test] - fn millau_to_rialto_conversion_rate_is_correct() { - let rialto_price = 18.18; - let millau_price = 136.35; - assert!(rialto_price < millau_price); - - let conversion_rate = tokens_conversion_rate(millau_price, rialto_price); - let millau_amount = 100.0; - let rialto_amount = millau_amount * conversion_rate; - assert!( - rialto_amount > millau_amount, - "{} MLU * {} = {} RLT", - millau_amount, - conversion_rate, - rialto_amount, - ); - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs b/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs deleted file mode 100644 index 27d91147c2d..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/lib.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The library of substrate relay. contains some public codes to provide to substrate relay. - -#![warn(missing_docs)] - -use std::time::Duration; - -pub mod conversion_rate_update; -pub mod error; -pub mod finality_guards; -pub mod finality_pipeline; -pub mod finality_source; -pub mod finality_target; -pub mod headers_initialize; -pub mod helpers; -pub mod messages_lane; -pub mod messages_metrics; -pub mod messages_source; -pub mod messages_target; -pub mod on_demand_headers; - -/// Default relay loop stall timeout. If transactions generated by relay are immortal, then -/// this timeout is used. -/// -/// There are no any strict requirements on block time in Substrate. But we assume here that all -/// Substrate-based chains will be designed to produce relatively fast (compared to the slowest -/// blockchains) blocks. So 1 hour seems to be a good guess for (even congested) chains to mine -/// transaction, or remove it from the pool. -pub const STALL_TIMEOUT: Duration = Duration::from_secs(60 * 60); - -/// Transaction creation parameters. -#[derive(Clone, Debug)] -pub struct TransactionParams { - /// Transactions author. - pub signer: TS, - /// Transactions mortality. - pub mortality: Option, -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs deleted file mode 100644 index 775249e9e23..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools for supporting message lanes between two Substrate-based chains. - -use crate::{ - conversion_rate_update::UpdateConversionRateCallBuilder, - messages_metrics::StandaloneMessagesMetrics, - messages_source::{SubstrateMessagesProof, SubstrateMessagesSource}, - messages_target::{SubstrateMessagesDeliveryProof, SubstrateMessagesTarget}, - on_demand_headers::OnDemandHeadersRelay, - TransactionParams, STALL_TIMEOUT, -}; - -use bp_messages::{LaneId, MessageNonce}; -use bp_runtime::{AccountIdOf, Chain as _}; -use bridge_runtime_common::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, -}; -use codec::Encode; -use frame_support::weights::{GetDispatchInfo, Weight}; -use messages_relay::{message_lane::MessageLane, relay_strategy::RelayStrategy}; -use pallet_bridge_messages::{Call as BridgeMessagesCall, Config as BridgeMessagesConfig}; -use relay_substrate_client::{ - transaction_stall_timeout, AccountKeyPairOf, BalanceOf, BlockNumberOf, CallOf, Chain, - ChainWithMessages, Client, HashOf, TransactionSignScheme, -}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; -use std::{fmt::Debug, marker::PhantomData}; - -/// Substrate -> Substrate messages synchronization pipeline. -pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { - /// Name of the source -> target tokens conversion rate parameter. - /// - /// The parameter is stored at the target chain and the storage key is computed using - /// `bp_runtime::storage_parameter_key` function. If value is unknown, it is assumed - /// to be 1. - const SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str>; - /// Name of the target -> source tokens conversion rate parameter. - /// - /// The parameter is stored at the source chain and the storage key is computed using - /// `bp_runtime::storage_parameter_key` function. If value is unknown, it is assumed - /// to be 1. - const TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME: Option<&'static str>; - - /// Name of the source chain fee multiplier parameter. - /// - /// The parameter is stored at the target chain and the storage key is computed using - /// `bp_runtime::storage_parameter_key` function. If value is unknown, it is assumed - /// to be 1. - const SOURCE_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str>; - /// Name of the target chain fee multiplier parameter. - /// - /// The parameter is stored at the source chain and the storage key is computed using - /// `bp_runtime::storage_parameter_key` function. If value is unknown, it is assumed - /// to be 1. - const TARGET_FEE_MULTIPLIER_PARAMETER_NAME: Option<&'static str>; - - /// Name of the transaction payment pallet, deployed at the source chain. - const AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str>; - /// Name of the transaction payment pallet, deployed at the target chain. - const AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME: Option<&'static str>; - - /// Messages of this chain are relayed to the `TargetChain`. - type SourceChain: ChainWithMessages; - /// Messages from the `SourceChain` are dispatched on this chain. - type TargetChain: ChainWithMessages; - - /// Scheme used to sign source chain transactions. - type SourceTransactionSignScheme: TransactionSignScheme; - /// Scheme used to sign target chain transactions. - type TargetTransactionSignScheme: TransactionSignScheme; - - /// How receive messages proof call is built? - type ReceiveMessagesProofCallBuilder: ReceiveMessagesProofCallBuilder; - /// How receive messages delivery proof call is built? - type ReceiveMessagesDeliveryProofCallBuilder: ReceiveMessagesDeliveryProofCallBuilder; - - /// `TargetChain` tokens to `SourceChain` tokens conversion rate update builder. - /// - /// If not applicable to this bridge, you may use `()` here. - type TargetToSourceChainConversionRateUpdateBuilder: UpdateConversionRateCallBuilder< - Self::SourceChain, - >; - - /// Message relay strategy. - type RelayStrategy: RelayStrategy; -} - -/// Adapter that allows all `SubstrateMessageLane` to act as `MessageLane`. -#[derive(Clone, Debug)] -pub(crate) struct MessageLaneAdapter { - _phantom: PhantomData

, -} - -impl MessageLane for MessageLaneAdapter

{ - const SOURCE_NAME: &'static str = P::SourceChain::NAME; - const TARGET_NAME: &'static str = P::TargetChain::NAME; - - type MessagesProof = SubstrateMessagesProof; - type MessagesReceivingProof = SubstrateMessagesDeliveryProof; - - type SourceChainBalance = BalanceOf; - type SourceHeaderNumber = BlockNumberOf; - type SourceHeaderHash = HashOf; - - type TargetHeaderNumber = BlockNumberOf; - type TargetHeaderHash = HashOf; -} - -/// Substrate <-> Substrate messages relay parameters. -pub struct MessagesRelayParams { - /// Messages source client. - pub source_client: Client, - /// Source transaction params. - pub source_transaction_params: - TransactionParams>, - /// Messages target client. - pub target_client: Client, - /// Target transaction params. - pub target_transaction_params: - TransactionParams>, - /// Optional on-demand source to target headers relay. - pub source_to_target_headers_relay: Option>, - /// Optional on-demand target to source headers relay. - pub target_to_source_headers_relay: Option>, - /// Identifier of lane that needs to be served. - pub lane_id: LaneId, - /// Metrics parameters. - pub metrics_params: MetricsParams, - /// Pre-registered standalone metrics. - pub standalone_metrics: Option>, - /// Relay strategy. - pub relay_strategy: P::RelayStrategy, -} - -/// Run Substrate-to-Substrate messages sync loop. -pub async fn run(params: MessagesRelayParams

) -> anyhow::Result<()> -where - AccountIdOf: - From< as Pair>::Public>, - AccountIdOf: - From< as Pair>::Public>, - BalanceOf: TryFrom>, - P::SourceTransactionSignScheme: TransactionSignScheme, - P::TargetTransactionSignScheme: TransactionSignScheme, -{ - let source_client = params.source_client; - let target_client = params.target_client; - let stall_timeout = relay_substrate_client::bidirectional_transaction_stall_timeout( - params.source_transaction_params.mortality, - params.target_transaction_params.mortality, - P::SourceChain::AVERAGE_BLOCK_INTERVAL, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - let relayer_id_at_source: AccountIdOf = - params.source_transaction_params.signer.public().into(); - - // 2/3 is reserved for proofs and tx overhead - let max_messages_size_in_single_batch = P::TargetChain::max_extrinsic_size() / 3; - // we don't know exact weights of the Polkadot runtime. So to guess weights we'll be using - // weights from Rialto and then simply dividing it by x2. - let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - crate::messages_lane::select_delivery_transaction_limits::< - ::WeightInfo, - >( - P::TargetChain::max_extrinsic_weight(), - P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - ); - let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - (max_messages_in_single_batch / 2, max_messages_weight_in_single_batch / 2); - - let standalone_metrics = params.standalone_metrics.map(Ok).unwrap_or_else(|| { - crate::messages_metrics::standalone_metrics::

( - source_client.clone(), - target_client.clone(), - ) - })?; - - log::info!( - target: "bridge", - "Starting {} -> {} messages relay.\n\t\ - {} relayer account id: {:?}\n\t\ - Max messages in single transaction: {}\n\t\ - Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}\n\t\ - Tx mortality: {:?} (~{}m)/{:?} (~{}m)\n\t\ - Stall timeout: {:?}", - P::SourceChain::NAME, - P::TargetChain::NAME, - P::SourceChain::NAME, - relayer_id_at_source, - max_messages_in_single_batch, - max_messages_size_in_single_batch, - max_messages_weight_in_single_batch, - params.source_transaction_params.mortality, - transaction_stall_timeout( - params.source_transaction_params.mortality, - P::SourceChain::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ).as_secs_f64() / 60.0f64, - params.target_transaction_params.mortality, - transaction_stall_timeout( - params.target_transaction_params.mortality, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ).as_secs_f64() / 60.0f64, - stall_timeout, - ); - - messages_relay::message_lane_loop::run( - messages_relay::message_lane_loop::Params { - lane: params.lane_id, - source_tick: P::SourceChain::AVERAGE_BLOCK_INTERVAL, - target_tick: P::TargetChain::AVERAGE_BLOCK_INTERVAL, - reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, - stall_timeout, - delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: - P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_nonces_at_target: - P::SourceChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - max_messages_in_single_batch, - max_messages_weight_in_single_batch, - max_messages_size_in_single_batch, - relay_strategy: params.relay_strategy, - }, - }, - SubstrateMessagesSource::

::new( - source_client.clone(), - target_client.clone(), - params.lane_id, - params.source_transaction_params, - params.target_to_source_headers_relay, - ), - SubstrateMessagesTarget::

::new( - target_client, - source_client, - params.lane_id, - relayer_id_at_source, - params.target_transaction_params, - standalone_metrics.clone(), - params.source_to_target_headers_relay, - ), - standalone_metrics.register_and_spawn(params.metrics_params)?, - futures::future::pending(), - ) - .await - .map_err(Into::into) -} - -/// Different ways of building `receive_messages_proof` calls. -pub trait ReceiveMessagesProofCallBuilder { - /// Given messages proof, build call of `receive_messages_proof` function of bridge - /// messages module at the target chain. - fn build_receive_messages_proof_call( - relayer_id_at_source: AccountIdOf, - proof: SubstrateMessagesProof, - messages_count: u32, - dispatch_weight: Weight, - trace_call: bool, - ) -> CallOf; -} - -/// Building `receive_messages_proof` call when you have direct access to the target -/// chain runtime. -pub struct DirectReceiveMessagesProofCallBuilder { - _phantom: PhantomData<(P, R, I)>, -} - -impl ReceiveMessagesProofCallBuilder

for DirectReceiveMessagesProofCallBuilder -where - P: SubstrateMessageLane, - R: BridgeMessagesConfig>, - I: 'static, - R::SourceHeaderChain: bp_messages::target_chain::SourceHeaderChain< - R::InboundMessageFee, - MessagesProof = FromBridgedChainMessagesProof>, - >, - CallOf: From> + GetDispatchInfo, -{ - fn build_receive_messages_proof_call( - relayer_id_at_source: AccountIdOf, - proof: SubstrateMessagesProof, - messages_count: u32, - dispatch_weight: Weight, - trace_call: bool, - ) -> CallOf { - let call: CallOf = BridgeMessagesCall::::receive_messages_proof { - relayer_id_at_bridged_chain: relayer_id_at_source, - proof: proof.1, - messages_count, - dispatch_weight, - } - .into(); - if trace_call { - // this trace isn't super-accurate, because limits are for transactions and we - // have a call here, but it provides required information - log::trace!( - target: "bridge", - "Prepared {} -> {} messages delivery call. Weight: {}/{}, size: {}/{}", - P::SourceChain::NAME, - P::TargetChain::NAME, - call.get_dispatch_info().weight, - P::TargetChain::max_extrinsic_weight(), - call.encode().len(), - P::TargetChain::max_extrinsic_size(), - ); - } - call - } -} - -/// Macro that generates `ReceiveMessagesProofCallBuilder` implementation for the case when -/// you only have an access to the mocked version of target chain runtime. In this case you -/// should provide "name" of the call variant for the bridge messages calls and the "name" of -/// the variant for the `receive_messages_proof` call within that first option. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_mocked_receive_message_proof_call_builder { - ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_proof:path) => { - pub struct $mocked_builder; - - impl $crate::messages_lane::ReceiveMessagesProofCallBuilder<$pipeline> - for $mocked_builder - { - fn build_receive_messages_proof_call( - relayer_id_at_source: relay_substrate_client::AccountIdOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain - >, - proof: $crate::messages_source::SubstrateMessagesProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain - >, - messages_count: u32, - dispatch_weight: Weight, - _trace_call: bool, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain - > { - $bridge_messages($receive_messages_proof( - relayer_id_at_source, - proof.1, - messages_count, - dispatch_weight, - )) - } - } - }; -} - -/// Different ways of building `receive_messages_delivery_proof` calls. -pub trait ReceiveMessagesDeliveryProofCallBuilder { - /// Given messages delivery proof, build call of `receive_messages_delivery_proof` function of - /// bridge messages module at the source chain. - fn build_receive_messages_delivery_proof_call( - proof: SubstrateMessagesDeliveryProof, - trace_call: bool, - ) -> CallOf; -} - -/// Building `receive_messages_delivery_proof` call when you have direct access to the source -/// chain runtime. -pub struct DirectReceiveMessagesDeliveryProofCallBuilder { - _phantom: PhantomData<(P, R, I)>, -} - -impl ReceiveMessagesDeliveryProofCallBuilder

- for DirectReceiveMessagesDeliveryProofCallBuilder -where - P: SubstrateMessageLane, - R: BridgeMessagesConfig, - I: 'static, - R::TargetHeaderChain: bp_messages::source_chain::TargetHeaderChain< - R::OutboundPayload, - R::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof>, - >, - CallOf: From> + GetDispatchInfo, -{ - fn build_receive_messages_delivery_proof_call( - proof: SubstrateMessagesDeliveryProof, - trace_call: bool, - ) -> CallOf { - let call: CallOf = - BridgeMessagesCall::::receive_messages_delivery_proof { - proof: proof.1, - relayers_state: proof.0, - } - .into(); - if trace_call { - // this trace isn't super-accurate, because limits are for transactions and we - // have a call here, but it provides required information - log::trace!( - target: "bridge", - "Prepared {} -> {} delivery confirmation transaction. Weight: {}/{}, size: {}/{}", - P::TargetChain::NAME, - P::SourceChain::NAME, - call.get_dispatch_info().weight, - P::SourceChain::max_extrinsic_weight(), - call.encode().len(), - P::SourceChain::max_extrinsic_size(), - ); - } - call - } -} - -/// Macro that generates `ReceiveMessagesDeliveryProofCallBuilder` implementation for the case when -/// you only have an access to the mocked version of source chain runtime. In this case you -/// should provide "name" of the call variant for the bridge messages calls and the "name" of -/// the variant for the `receive_messages_delivery_proof` call within that first option. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_mocked_receive_message_delivery_proof_call_builder { - ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_delivery_proof:path) => { - pub struct $mocked_builder; - - impl $crate::messages_lane::ReceiveMessagesDeliveryProofCallBuilder<$pipeline> - for $mocked_builder - { - fn build_receive_messages_delivery_proof_call( - proof: $crate::messages_target::SubstrateMessagesDeliveryProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain - >, - _trace_call: bool, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain - > { - $bridge_messages($receive_messages_delivery_proof(proof.1, proof.0)) - } - } - }; -} - -/// Returns maximal number of messages and their maximal cumulative dispatch weight, based -/// on given chain parameters. -pub fn select_delivery_transaction_limits( - max_extrinsic_weight: Weight, - max_unconfirmed_messages_at_inbound_lane: MessageNonce, -) -> (MessageNonce, Weight) { - // We may try to guess accurate value, based on maximal number of messages and per-message - // weight overhead, but the relay loop isn't using this info in a super-accurate way anyway. - // So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is - // for messages dispatch. - - // Another thing to keep in mind is that our runtimes (when this code was written) accept - // messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than - // that for dispatch. - - let weight_for_delivery_tx = max_extrinsic_weight / 3; - let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; - - let delivery_tx_base_weight = W::receive_messages_proof_overhead() + - W::receive_messages_proof_outbound_lane_state_overhead(); - let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_base_weight; - let max_number_of_messages = std::cmp::min( - delivery_tx_weight_rest / W::receive_messages_proof_messages_overhead(1), - max_unconfirmed_messages_at_inbound_lane, - ); - - assert!( - max_number_of_messages > 0, - "Relay should fit at least one message in every delivery transaction", - ); - assert!( - weight_for_messages_dispatch >= max_extrinsic_weight / 2, - "Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2", - ); - - (max_number_of_messages, weight_for_messages_dispatch) -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_runtime::Chain; - - type RialtoToMillauMessagesWeights = - pallet_bridge_messages::weights::MillauWeight; - - #[test] - fn select_delivery_transaction_limits_works() { - let (max_count, max_weight) = - select_delivery_transaction_limits::( - bp_millau::Millau::max_extrinsic_weight(), - bp_rialto::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - ); - assert_eq!( - (max_count, max_weight), - // We don't actually care about these values, so feel free to update them whenever test - // fails. The only thing to do before that is to ensure that new values looks sane: - // i.e. weight reserved for messages dispatch allows dispatch of non-trivial messages. - // - // Any significant change in this values should attract additional attention. - (958, 216_583_333_334), - ); - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_metrics.rs deleted file mode 100644 index 918449e509b..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools for supporting message lanes between two Substrate-based chains. - -use crate::{helpers::tokens_conversion_rate, messages_lane::SubstrateMessageLane}; - -use codec::Decode; -use frame_system::AccountInfo; -use pallet_balances::AccountData; -use relay_substrate_client::{ - metrics::{ - FixedU128OrOne, FloatStorageValue, FloatStorageValueMetric, StorageProofOverheadMetric, - }, - AccountIdOf, BalanceOf, Chain, ChainWithBalances, Client, Error as SubstrateError, IndexOf, -}; -use relay_utils::metrics::{ - FloatJsonValueMetric, GlobalMetrics, MetricsParams, PrometheusError, StandaloneMetric, -}; -use sp_core::storage::StorageData; -use sp_runtime::{FixedPointNumber, FixedU128}; -use std::{fmt::Debug, marker::PhantomData}; - -/// Name of the `NextFeeMultiplier` storage value within the transaction payment pallet. -const NEXT_FEE_MULTIPLIER_VALUE_NAME: &str = "NextFeeMultiplier"; - -/// Shared references to the standalone metrics of the message lane relay loop. -#[derive(Debug, Clone)] -pub struct StandaloneMessagesMetrics { - /// Global metrics. - pub global: GlobalMetrics, - /// Storage chain proof overhead metric. - pub source_storage_proof_overhead: StorageProofOverheadMetric, - /// Target chain proof overhead metric. - pub target_storage_proof_overhead: StorageProofOverheadMetric, - /// Source tokens to base conversion rate metric. - pub source_to_base_conversion_rate: Option, - /// Target tokens to base conversion rate metric. - pub target_to_base_conversion_rate: Option, - /// Source tokens to target tokens conversion rate metric. This rate is stored by the target - /// chain. - pub source_to_target_conversion_rate: Option>, - /// Target tokens to source tokens conversion rate metric. This rate is stored by the source - /// chain. - pub target_to_source_conversion_rate: Option>, - - /// Actual source chain fee multiplier. - pub source_fee_multiplier: Option>, - /// Source chain fee multiplier, stored at the target chain. - pub source_fee_multiplier_at_target: Option>, - /// Actual target chain fee multiplier. - pub target_fee_multiplier: Option>, - /// Target chain fee multiplier, stored at the target chain. - pub target_fee_multiplier_at_source: Option>, -} - -impl StandaloneMessagesMetrics { - /// Swap source and target sides. - pub fn reverse(self) -> StandaloneMessagesMetrics { - StandaloneMessagesMetrics { - global: self.global, - source_storage_proof_overhead: self.target_storage_proof_overhead, - target_storage_proof_overhead: self.source_storage_proof_overhead, - source_to_base_conversion_rate: self.target_to_base_conversion_rate, - target_to_base_conversion_rate: self.source_to_base_conversion_rate, - source_to_target_conversion_rate: self.target_to_source_conversion_rate, - target_to_source_conversion_rate: self.source_to_target_conversion_rate, - source_fee_multiplier: self.target_fee_multiplier, - source_fee_multiplier_at_target: self.target_fee_multiplier_at_source, - target_fee_multiplier: self.source_fee_multiplier, - target_fee_multiplier_at_source: self.source_fee_multiplier_at_target, - } - } - - /// Register all metrics in the registry. - pub fn register_and_spawn( - self, - metrics: MetricsParams, - ) -> Result { - self.global.register_and_spawn(&metrics.registry)?; - self.source_storage_proof_overhead.register_and_spawn(&metrics.registry)?; - self.target_storage_proof_overhead.register_and_spawn(&metrics.registry)?; - if let Some(m) = self.source_to_base_conversion_rate { - m.register_and_spawn(&metrics.registry)?; - } - if let Some(m) = self.target_to_base_conversion_rate { - m.register_and_spawn(&metrics.registry)?; - } - if let Some(m) = self.target_to_source_conversion_rate { - m.register_and_spawn(&metrics.registry)?; - } - if let Some(m) = self.source_fee_multiplier { - m.register_and_spawn(&metrics.registry)?; - } - if let Some(m) = self.source_fee_multiplier_at_target { - m.register_and_spawn(&metrics.registry)?; - } - if let Some(m) = self.target_fee_multiplier { - m.register_and_spawn(&metrics.registry)?; - } - if let Some(m) = self.target_fee_multiplier_at_source { - m.register_and_spawn(&metrics.registry)?; - } - Ok(metrics) - } - - /// Return conversion rate from target to source tokens. - pub async fn target_to_source_conversion_rate(&self) -> Option { - let from_token_value = - (*self.target_to_base_conversion_rate.as_ref()?.shared_value_ref().read().await)?; - let to_token_value = - (*self.source_to_base_conversion_rate.as_ref()?.shared_value_ref().read().await)?; - Some(tokens_conversion_rate(from_token_value, to_token_value)) - } -} - -/// Create symmetric standalone metrics for the message lane relay loop. -/// -/// All metrics returned by this function are exposed by loops that are serving given lane (`P`) -/// and by loops that are serving reverse lane (`P` with swapped `TargetChain` and `SourceChain`). -/// We assume that either conversion rate parameters have values in the storage, or they are -/// initialized with 1:1. -pub fn standalone_metrics( - source_client: Client, - target_client: Client, -) -> anyhow::Result> { - Ok(StandaloneMessagesMetrics { - global: GlobalMetrics::new()?, - source_storage_proof_overhead: StorageProofOverheadMetric::new( - source_client.clone(), - format!("{}_storage_proof_overhead", P::SourceChain::NAME.to_lowercase()), - format!("{} storage proof overhead", P::SourceChain::NAME), - )?, - target_storage_proof_overhead: StorageProofOverheadMetric::new( - target_client.clone(), - format!("{}_storage_proof_overhead", P::TargetChain::NAME.to_lowercase()), - format!("{} storage proof overhead", P::TargetChain::NAME), - )?, - source_to_base_conversion_rate: P::SourceChain::TOKEN_ID - .map(|source_chain_token_id| { - crate::helpers::token_price_metric(source_chain_token_id).map(Some) - }) - .unwrap_or(Ok(None))?, - target_to_base_conversion_rate: P::TargetChain::TOKEN_ID - .map(|target_chain_token_id| { - crate::helpers::token_price_metric(target_chain_token_id).map(Some) - }) - .unwrap_or(Ok(None))?, - source_to_target_conversion_rate: P::SOURCE_TO_TARGET_CONVERSION_RATE_PARAMETER_NAME - .map(bp_runtime::storage_parameter_key) - .map(|key| { - FloatStorageValueMetric::new( - FixedU128OrOne::default(), - target_client.clone(), - key, - format!( - "{}_{}_to_{}_conversion_rate", - P::TargetChain::NAME, - P::SourceChain::NAME, - P::TargetChain::NAME - ), - format!( - "{} to {} tokens conversion rate (used by {})", - P::SourceChain::NAME, - P::TargetChain::NAME, - P::TargetChain::NAME - ), - ) - .map(Some) - }) - .unwrap_or(Ok(None))?, - target_to_source_conversion_rate: P::TARGET_TO_SOURCE_CONVERSION_RATE_PARAMETER_NAME - .map(bp_runtime::storage_parameter_key) - .map(|key| { - FloatStorageValueMetric::new( - FixedU128OrOne::default(), - source_client.clone(), - key, - format!( - "{}_{}_to_{}_conversion_rate", - P::SourceChain::NAME, - P::TargetChain::NAME, - P::SourceChain::NAME - ), - format!( - "{} to {} tokens conversion rate (used by {})", - P::TargetChain::NAME, - P::SourceChain::NAME, - P::SourceChain::NAME - ), - ) - .map(Some) - }) - .unwrap_or(Ok(None))?, - source_fee_multiplier: P::AT_SOURCE_TRANSACTION_PAYMENT_PALLET_NAME - .map(|pallet| bp_runtime::storage_value_key(pallet, NEXT_FEE_MULTIPLIER_VALUE_NAME)) - .map(|key| { - log::trace!(target: "bridge", "{}_fee_multiplier", P::SourceChain::NAME); - FloatStorageValueMetric::new( - FixedU128OrOne::default(), - source_client.clone(), - key, - format!("{}_fee_multiplier", P::SourceChain::NAME,), - format!("{} fee multiplier", P::SourceChain::NAME,), - ) - .map(Some) - }) - .unwrap_or(Ok(None))?, - source_fee_multiplier_at_target: P::SOURCE_FEE_MULTIPLIER_PARAMETER_NAME - .map(bp_runtime::storage_parameter_key) - .map(|key| { - FloatStorageValueMetric::new( - FixedU128OrOne::default(), - target_client.clone(), - key, - format!("{}_{}_fee_multiplier", P::TargetChain::NAME, P::SourceChain::NAME,), - format!( - "{} fee multiplier stored at {}", - P::SourceChain::NAME, - P::TargetChain::NAME, - ), - ) - .map(Some) - }) - .unwrap_or(Ok(None))?, - target_fee_multiplier: P::AT_TARGET_TRANSACTION_PAYMENT_PALLET_NAME - .map(|pallet| bp_runtime::storage_value_key(pallet, NEXT_FEE_MULTIPLIER_VALUE_NAME)) - .map(|key| { - log::trace!(target: "bridge", "{}_fee_multiplier", P::TargetChain::NAME); - FloatStorageValueMetric::new( - FixedU128OrOne::default(), - target_client, - key, - format!("{}_fee_multiplier", P::TargetChain::NAME,), - format!("{} fee multiplier", P::TargetChain::NAME,), - ) - .map(Some) - }) - .unwrap_or(Ok(None))?, - target_fee_multiplier_at_source: P::TARGET_FEE_MULTIPLIER_PARAMETER_NAME - .map(bp_runtime::storage_parameter_key) - .map(|key| { - FloatStorageValueMetric::new( - FixedU128OrOne::default(), - source_client, - key, - format!("{}_{}_fee_multiplier", P::SourceChain::NAME, P::TargetChain::NAME,), - format!( - "{} fee multiplier stored at {}", - P::TargetChain::NAME, - P::SourceChain::NAME, - ), - ) - .map(Some) - }) - .unwrap_or(Ok(None))?, - }) -} - -/// Add relay accounts balance metrics. -pub async fn add_relay_balances_metrics( - client: Client, - metrics: MetricsParams, - relay_account_id: Option>, - messages_pallet_owner_account_id: Option>, -) -> anyhow::Result -where - BalanceOf: Into + std::fmt::Debug, -{ - if relay_account_id.is_none() && messages_pallet_owner_account_id.is_none() { - return Ok(metrics) - } - - // if `tokenDecimals` is missing from system properties, we'll be using - let token_decimals = client - .token_decimals() - .await? - .map(|token_decimals| { - log::info!(target: "bridge", "Read `tokenDecimals` for {}: {}", C::NAME, token_decimals); - token_decimals - }) - .unwrap_or_else(|| { - // turns out it is normal not to have this property - e.g. when polkadot binary is - // started using `polkadot-local` chain. Let's use minimal nominal here - log::info!(target: "bridge", "Using default (zero) `tokenDecimals` value for {}", C::NAME); - 0 - }); - let token_decimals = u32::try_from(token_decimals).map_err(|e| { - anyhow::format_err!( - "Token decimals value ({}) of {} doesn't fit into u32: {:?}", - token_decimals, - C::NAME, - e, - ) - })?; - if let Some(relay_account_id) = relay_account_id { - let relay_account_balance_metric = FloatStorageValueMetric::new( - FreeAccountBalance:: { token_decimals, _phantom: Default::default() }, - client.clone(), - C::account_info_storage_key(&relay_account_id), - format!("at_{}_relay_balance", C::NAME), - format!("Balance of the relay account at the {}", C::NAME), - )?; - relay_account_balance_metric.register_and_spawn(&metrics.registry)?; - } - if let Some(messages_pallet_owner_account_id) = messages_pallet_owner_account_id { - let pallet_owner_account_balance_metric = FloatStorageValueMetric::new( - FreeAccountBalance:: { token_decimals, _phantom: Default::default() }, - client.clone(), - C::account_info_storage_key(&messages_pallet_owner_account_id), - format!("at_{}_messages_pallet_owner_balance", C::NAME), - format!("Balance of the messages pallet owner at the {}", C::NAME), - )?; - pallet_owner_account_balance_metric.register_and_spawn(&metrics.registry)?; - } - Ok(metrics) -} - -/// Adapter for `FloatStorageValueMetric` to decode account free balance. -#[derive(Clone, Debug)] -struct FreeAccountBalance { - token_decimals: u32, - _phantom: PhantomData, -} - -impl FloatStorageValue for FreeAccountBalance -where - C: Chain, - BalanceOf: Into, -{ - type Value = FixedU128; - - fn decode( - &self, - maybe_raw_value: Option, - ) -> Result, SubstrateError> { - maybe_raw_value - .map(|raw_value| { - AccountInfo::, AccountData>>::decode(&mut &raw_value.0[..]) - .map_err(SubstrateError::ResponseParseFailed) - .map(|account_data| { - convert_to_token_balance(account_data.data.free.into(), self.token_decimals) - }) - }) - .transpose() - } -} - -/// Convert from raw `u128` balance (nominated in smallest chain token units) to the float regular -/// tokens value. -fn convert_to_token_balance(balance: u128, token_decimals: u32) -> FixedU128 { - FixedU128::from_inner(balance.saturating_mul(FixedU128::DIV / 10u128.pow(token_decimals))) -} - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::storage::generator::StorageValue; - use sp_core::storage::StorageKey; - - #[test] - fn token_decimals_used_properly() { - let plancks = 425_000_000_000; - let token_decimals = 10; - let dots = convert_to_token_balance(plancks, token_decimals); - assert_eq!(dots, FixedU128::saturating_from_rational(425, 10)); - } - - #[test] - fn next_fee_multiplier_storage_key_is_correct() { - assert_eq!( - bp_runtime::storage_value_key("TransactionPayment", NEXT_FEE_MULTIPLIER_VALUE_NAME), - StorageKey(pallet_transaction_payment::NextFeeMultiplier::::storage_value_final_key().to_vec()), - ); - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs deleted file mode 100644 index 77dd2aed05b..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/messages_source.rs +++ /dev/null @@ -1,652 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate messages source. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! chain. - -use crate::{ - messages_lane::{ - MessageLaneAdapter, ReceiveMessagesDeliveryProofCallBuilder, SubstrateMessageLane, - }, - messages_target::SubstrateMessagesDeliveryProof, - on_demand_headers::OnDemandHeadersRelay, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_messages::{ - storage_keys::{operating_mode_key, outbound_lane_data_key}, - LaneId, MessageNonce, OperatingMode, OutboundLaneData, UnrewardedRelayersState, -}; -use bridge_runtime_common::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, -}; -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use messages_relay::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{ - ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient, - SourceClientState, - }, -}; -use num_traits::{Bounded, Zero}; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, BlockNumberOf, Chain, ChainWithMessages, Client, - Error as SubstrateError, HashOf, HeaderIdOf, IndexOf, SignParam, TransactionEra, - TransactionSignScheme, UnsignedTransaction, -}; -use relay_utils::{relay_loop::Client as RelayClient, HeaderId}; -use sp_core::{Bytes, Pair}; -use sp_runtime::{traits::Header as HeaderT, DeserializeOwned}; -use std::ops::RangeInclusive; - -/// Intermediate message proof returned by the source Substrate node. Includes everything -/// required to submit to the target node: cumulative dispatch weight of bundled messages and -/// the proof itself. -pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); - -/// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - source_client: Client, - target_client: Client, - lane_id: LaneId, - transaction_params: TransactionParams>, - target_to_source_headers_relay: Option>, -} - -impl SubstrateMessagesSource

{ - /// Create new Substrate headers source. - pub fn new( - source_client: Client, - target_client: Client, - lane_id: LaneId, - transaction_params: TransactionParams>, - target_to_source_headers_relay: Option>, - ) -> Self { - SubstrateMessagesSource { - source_client, - target_client, - lane_id, - transaction_params, - target_to_source_headers_relay, - } - } - - /// Read outbound lane state from the on-chain storage at given block. - async fn outbound_lane_data( - &self, - id: SourceHeaderIdOf>, - ) -> Result, SubstrateError> { - self.source_client - .storage_value( - outbound_lane_data_key( - P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - ), - Some(id.1), - ) - .await - } - - /// Ensure that the messages pallet at source chain is active. - async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.source_client).await - } -} - -impl Clone for SubstrateMessagesSource

{ - fn clone(&self) -> Self { - Self { - source_client: self.source_client.clone(), - target_client: self.target_client.clone(), - lane_id: self.lane_id, - transaction_params: self.transaction_params.clone(), - target_to_source_headers_relay: self.target_to_source_headers_relay.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateMessagesSource

{ - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.source_client.reconnect().await?; - self.target_client.reconnect().await - } -} - -#[async_trait] -impl SourceClient> for SubstrateMessagesSource

-where - AccountIdOf: - From< as Pair>::Public>, - P::SourceTransactionSignScheme: TransactionSignScheme, -{ - async fn state(&self) -> Result>, SubstrateError> { - // we can't continue to deliver confirmations if source node is out of sync, because - // it may have already received confirmations that we're going to deliver - self.source_client.ensure_synced().await?; - // we can't relay confirmations if messages pallet at source chain is halted - self.ensure_pallet_active().await?; - - read_client_state( - &self.source_client, - Some(&self.target_client), - P::TargetChain::BEST_FINALIZED_HEADER_ID_METHOD, - ) - .await - } - - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf>, - ) -> Result<(SourceHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is sent - let latest_generated_nonce = self - .outbound_lane_data(id) - .await? - .map(|data| data.latest_generated_nonce) - .unwrap_or(0); - Ok((id, latest_generated_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf>, - ) -> Result<(SourceHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is sent - let latest_received_nonce = self - .outbound_lane_data(id) - .await? - .map(|data| data.latest_received_nonce) - .unwrap_or(0); - Ok((id, latest_received_nonce)) - } - - async fn generated_message_details( - &self, - id: SourceHeaderIdOf>, - nonces: RangeInclusive, - ) -> Result< - MessageDetailsMap< as MessageLane>::SourceChainBalance>, - SubstrateError, - > { - let encoded_response = self - .source_client - .state_call( - P::TargetChain::TO_CHAIN_MESSAGE_DETAILS_METHOD.into(), - Bytes((self.lane_id, nonces.start(), nonces.end()).encode()), - Some(id.1), - ) - .await?; - - make_message_details_map::( - Decode::decode(&mut &encoded_response.0[..]) - .map_err(SubstrateError::ResponseParseFailed)?, - nonces, - ) - } - - async fn prove_messages( - &self, - id: SourceHeaderIdOf>, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result< - ( - SourceHeaderIdOf>, - RangeInclusive, - as MessageLane>::MessagesProof, - ), - SubstrateError, - > { - let mut storage_keys = - Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); - let mut message_nonce = *nonces.start(); - while message_nonce <= *nonces.end() { - let message_key = bp_messages::storage_keys::message_key( - P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - message_nonce, - ); - storage_keys.push(message_key); - message_nonce += 1; - } - if proof_parameters.outbound_state_proof_required { - storage_keys.push(bp_messages::storage_keys::outbound_lane_data_key( - P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - )); - } - - let proof = self - .source_client - .prove_storage(storage_keys, id.1) - .await? - .iter_nodes() - .collect(); - let proof = FromBridgedChainMessagesProof { - bridged_header_hash: id.1, - storage_proof: proof, - lane: self.lane_id, - nonces_start: *nonces.start(), - nonces_end: *nonces.end(), - }; - Ok((id, nonces, (proof_parameters.dispatch_weight, proof))) - } - - async fn submit_messages_receiving_proof( - &self, - _generated_at_block: TargetHeaderIdOf>, - proof: as MessageLane>::MessagesReceivingProof, - ) -> Result<(), SubstrateError> { - let genesis_hash = *self.source_client.genesis_hash(); - let transaction_params = self.transaction_params.clone(); - let (spec_version, transaction_version) = - self.source_client.simple_runtime_version().await?; - self.source_client - .submit_signed_extrinsic( - self.transaction_params.signer.public().into(), - move |best_block_id, transaction_nonce| { - make_messages_delivery_proof_transaction::

( - spec_version, - transaction_version, - &genesis_hash, - &transaction_params, - best_block_id, - transaction_nonce, - proof, - true, - ) - }, - ) - .await?; - Ok(()) - } - - async fn require_target_header_on_source(&self, id: TargetHeaderIdOf>) { - if let Some(ref target_to_source_headers_relay) = self.target_to_source_headers_relay { - target_to_source_headers_relay.require_finalized_header(id).await; - } - } - - async fn estimate_confirmation_transaction( - &self, - ) -> as MessageLane>::SourceChainBalance { - let runtime_version = match self.source_client.runtime_version().await { - Ok(v) => v, - Err(_) => return BalanceOf::::max_value(), - }; - async { - let dummy_tx = make_messages_delivery_proof_transaction::

( - runtime_version.spec_version, - runtime_version.transaction_version, - self.source_client.genesis_hash(), - &self.transaction_params, - HeaderId(Default::default(), Default::default()), - Zero::zero(), - prepare_dummy_messages_delivery_proof::(), - false, - )?; - self.source_client - .estimate_extrinsic_fee(dummy_tx) - .await - .map(|fee| fee.inclusion_fee()) - } - .await - .unwrap_or_else(|_| BalanceOf::::max_value()) - } -} - -/// Ensure that the messages pallet at source chain is active. -pub(crate) async fn ensure_messages_pallet_active( - client: &Client, -) -> Result<(), SubstrateError> -where - AtChain: ChainWithMessages, - WithChain: ChainWithMessages, -{ - let operating_mode = client - .storage_value(operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), None) - .await?; - let is_halted = operating_mode == Some(OperatingMode::Halted); - if is_halted { - Err(SubstrateError::BridgePalletIsHalted) - } else { - Ok(()) - } -} - -/// Make messages delivery proof transaction from given proof. -#[allow(clippy::too_many_arguments)] -fn make_messages_delivery_proof_transaction( - spec_version: u32, - transaction_version: u32, - source_genesis_hash: &HashOf, - source_transaction_params: &TransactionParams>, - source_best_block_id: HeaderIdOf, - transaction_nonce: IndexOf, - proof: SubstrateMessagesDeliveryProof, - trace_call: bool, -) -> Result -where - P::SourceTransactionSignScheme: TransactionSignScheme, -{ - let call = - P::ReceiveMessagesDeliveryProofCallBuilder::build_receive_messages_delivery_proof_call( - proof, trace_call, - ); - Ok(Bytes( - P::SourceTransactionSignScheme::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: *source_genesis_hash, - signer: source_transaction_params.signer.clone(), - era: TransactionEra::new(source_best_block_id, source_transaction_params.mortality), - unsigned: UnsignedTransaction::new(call.into(), transaction_nonce), - })? - .encode(), - )) -} - -/// Prepare 'dummy' messages delivery proof that will compose the delivery confirmation transaction. -/// -/// We don't care about proof actually being the valid proof, because its validity doesn't -/// affect the call weight - we only care about its size. -fn prepare_dummy_messages_delivery_proof( -) -> SubstrateMessagesDeliveryProof { - let single_message_confirmation_size = bp_messages::InboundLaneData::<()>::encoded_size_hint( - SC::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - 1, - 1, - ) - .unwrap_or(u32::MAX); - let proof_size = TC::STORAGE_PROOF_OVERHEAD.saturating_add(single_message_confirmation_size); - ( - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - }, - FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: Default::default(), - storage_proof: vec![vec![0; proof_size as usize]], - lane: Default::default(), - }, - ) -} - -/// Read best blocks from given client. -/// -/// This function assumes that the chain that is followed by the `self_client` has -/// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name` -/// runtime API to read the best finalized Bridged chain header. -/// -/// If `peer_client` is `None`, the value of `actual_best_finalized_peer_at_best_self` will -/// always match the `best_finalized_peer_at_best_self`. -pub async fn read_client_state( - self_client: &Client, - peer_client: Option<&Client>, - best_finalized_header_id_method_name: &str, -) -> Result, HeaderIdOf>, SubstrateError> -where - SelfChain: Chain, - SelfChain::Header: DeserializeOwned, - SelfChain::Index: DeserializeOwned, - PeerChain: Chain, -{ - // let's read our state first: we need best finalized header hash on **this** chain - let self_best_finalized_header_hash = self_client.best_finalized_header_hash().await?; - let self_best_finalized_header = - self_client.header_by_hash(self_best_finalized_header_hash).await?; - let self_best_finalized_id = - HeaderId(*self_best_finalized_header.number(), self_best_finalized_header_hash); - - // now let's read our best header on **this** chain - let self_best_header = self_client.best_header().await?; - let self_best_hash = self_best_header.hash(); - let self_best_id = HeaderId(*self_best_header.number(), self_best_hash); - - // now let's read id of best finalized peer header at our best finalized block - let encoded_best_finalized_peer_on_self = self_client - .state_call( - best_finalized_header_id_method_name.into(), - Bytes(Vec::new()), - Some(self_best_hash), - ) - .await?; - let decoded_best_finalized_peer_on_self: (BlockNumberOf, HashOf) = - Decode::decode(&mut &encoded_best_finalized_peer_on_self.0[..]) - .map_err(SubstrateError::ResponseParseFailed)?; - let peer_on_self_best_finalized_id = - HeaderId(decoded_best_finalized_peer_on_self.0, decoded_best_finalized_peer_on_self.1); - - // read actual header, matching the `peer_on_self_best_finalized_id` from the peer chain - let actual_peer_on_self_best_finalized_id = match peer_client { - Some(peer_client) => { - let actual_peer_on_self_best_finalized = - peer_client.header_by_number(peer_on_self_best_finalized_id.0).await?; - HeaderId(peer_on_self_best_finalized_id.0, actual_peer_on_self_best_finalized.hash()) - }, - None => peer_on_self_best_finalized_id, - }; - - Ok(ClientState { - best_self: self_best_id, - best_finalized_self: self_best_finalized_id, - best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, - actual_best_finalized_peer_at_best_self: actual_peer_on_self_best_finalized_id, - }) -} - -fn make_message_details_map( - weights: Vec>, - nonces: RangeInclusive, -) -> Result, SubstrateError> { - let make_missing_nonce_error = |expected_nonce| { - Err(SubstrateError::Custom(format!( - "Missing nonce {} in message_details call result. Expected all nonces from {:?}", - expected_nonce, nonces, - ))) - }; - - let mut weights_map = MessageDetailsMap::new(); - - // this is actually prevented by external logic - if nonces.is_empty() { - return Ok(weights_map) - } - - // check if last nonce is missing - loop below is not checking this - let last_nonce_is_missing = - weights.last().map(|details| details.nonce != *nonces.end()).unwrap_or(true); - if last_nonce_is_missing { - return make_missing_nonce_error(*nonces.end()) - } - - let mut expected_nonce = *nonces.start(); - let mut is_at_head = true; - - for details in weights { - match (details.nonce == expected_nonce, is_at_head) { - (true, _) => (), - (false, true) => { - // this may happen if some messages were already pruned from the source node - // - // this is not critical error and will be auto-resolved by messages lane (and target - // node) - log::info!( - target: "bridge", - "Some messages are missing from the {} node: {:?}. Target node may be out of sync?", - C::NAME, - expected_nonce..details.nonce, - ); - }, - (false, false) => { - // some nonces are missing from the middle/tail of the range - // - // this is critical error, because we can't miss any nonces - return make_missing_nonce_error(expected_nonce) - }, - } - - weights_map.insert( - details.nonce, - MessageDetails { - dispatch_weight: details.dispatch_weight, - size: details.size as _, - reward: details.delivery_and_dispatch_fee, - dispatch_fee_payment: details.dispatch_fee_payment, - }, - ); - expected_nonce = details.nonce + 1; - is_at_head = false; - } - - Ok(weights_map) -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_runtime::messages::DispatchFeePayment; - use relay_rococo_client::Rococo; - use relay_wococo_client::Wococo; - - fn message_details_from_rpc( - nonces: RangeInclusive, - ) -> Vec> { - nonces - .into_iter() - .map(|nonce| bp_messages::MessageDetails { - nonce, - dispatch_weight: 0, - size: 0, - delivery_and_dispatch_fee: 0, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - }) - .collect() - } - - #[test] - fn make_message_details_map_succeeds_if_no_messages_are_missing() { - assert_eq!( - make_message_details_map::(message_details_from_rpc(1..=3), 1..=3,).unwrap(), - vec![ - ( - 1, - MessageDetails { - dispatch_weight: 0, - size: 0, - reward: 0, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - } - ), - ( - 2, - MessageDetails { - dispatch_weight: 0, - size: 0, - reward: 0, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - } - ), - ( - 3, - MessageDetails { - dispatch_weight: 0, - size: 0, - reward: 0, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - } - ), - ] - .into_iter() - .collect(), - ); - } - - #[test] - fn make_message_details_map_succeeds_if_head_messages_are_missing() { - assert_eq!( - make_message_details_map::(message_details_from_rpc(2..=3), 1..=3,).unwrap(), - vec![ - ( - 2, - MessageDetails { - dispatch_weight: 0, - size: 0, - reward: 0, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - } - ), - ( - 3, - MessageDetails { - dispatch_weight: 0, - size: 0, - reward: 0, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - } - ), - ] - .into_iter() - .collect(), - ); - } - - #[test] - fn make_message_details_map_fails_if_mid_messages_are_missing() { - let mut message_details_from_rpc = message_details_from_rpc(1..=3); - message_details_from_rpc.remove(1); - assert!(matches!( - make_message_details_map::(message_details_from_rpc, 1..=3,), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn make_message_details_map_fails_if_tail_messages_are_missing() { - assert!(matches!( - make_message_details_map::(message_details_from_rpc(1..=2), 1..=3,), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn make_message_details_map_fails_if_all_messages_are_missing() { - assert!(matches!( - make_message_details_map::(vec![], 1..=3), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn prepare_dummy_messages_delivery_proof_works() { - let expected_minimal_size = - Wococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE + Rococo::STORAGE_PROOF_OVERHEAD; - let dummy_proof = prepare_dummy_messages_delivery_proof::(); - assert!( - dummy_proof.1.encode().len() as u32 > expected_minimal_size, - "Expected proof size at least {}. Got: {}", - expected_minimal_size, - dummy_proof.1.encode().len(), - ); - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs b/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs deleted file mode 100644 index fbf6368d4e7..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ /dev/null @@ -1,583 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate messages target. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! chain. - -use crate::{ - messages_lane::{MessageLaneAdapter, ReceiveMessagesProofCallBuilder, SubstrateMessageLane}, - messages_metrics::StandaloneMessagesMetrics, - messages_source::{ensure_messages_pallet_active, read_client_state, SubstrateMessagesProof}, - on_demand_headers::OnDemandHeadersRelay, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_messages::{ - storage_keys::inbound_lane_data_key, total_unrewarded_messages, InboundLaneData, LaneId, - MessageNonce, UnrewardedRelayersState, -}; -use bridge_runtime_common::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, -}; -use codec::Encode; -use frame_support::weights::{Weight, WeightToFeePolynomial}; -use messages_relay::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{TargetClient, TargetClientState}, -}; -use num_traits::{Bounded, Zero}; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithMessages, Client, - Error as SubstrateError, HashOf, HeaderIdOf, IndexOf, SignParam, TransactionEra, - TransactionSignScheme, UnsignedTransaction, WeightToFeeOf, -}; -use relay_utils::{relay_loop::Client as RelayClient, HeaderId}; -use sp_core::{Bytes, Pair}; -use sp_runtime::{traits::Saturating, FixedPointNumber, FixedU128}; -use std::{collections::VecDeque, ops::RangeInclusive}; - -/// Message receiving proof returned by the target Substrate node. -pub type SubstrateMessagesDeliveryProof = - (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); - -/// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - target_client: Client, - source_client: Client, - lane_id: LaneId, - relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, - metric_values: StandaloneMessagesMetrics, - source_to_target_headers_relay: Option>, -} - -impl SubstrateMessagesTarget

{ - /// Create new Substrate headers target. - pub fn new( - target_client: Client, - source_client: Client, - lane_id: LaneId, - relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, - metric_values: StandaloneMessagesMetrics, - source_to_target_headers_relay: Option>, - ) -> Self { - SubstrateMessagesTarget { - target_client, - source_client, - lane_id, - relayer_id_at_source, - transaction_params, - metric_values, - source_to_target_headers_relay, - } - } - - /// Read inbound lane state from the on-chain storage at given block. - async fn inbound_lane_data( - &self, - id: TargetHeaderIdOf>, - ) -> Result>>, SubstrateError> { - self.target_client - .storage_value( - inbound_lane_data_key( - P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - ), - Some(id.1), - ) - .await - } - - /// Ensure that the messages pallet at target chain is active. - async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.target_client).await - } -} - -impl Clone for SubstrateMessagesTarget

{ - fn clone(&self) -> Self { - Self { - target_client: self.target_client.clone(), - source_client: self.source_client.clone(), - lane_id: self.lane_id, - relayer_id_at_source: self.relayer_id_at_source.clone(), - transaction_params: self.transaction_params.clone(), - metric_values: self.metric_values.clone(), - source_to_target_headers_relay: self.source_to_target_headers_relay.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateMessagesTarget

{ - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.target_client.reconnect().await?; - self.source_client.reconnect().await - } -} - -#[async_trait] -impl TargetClient> for SubstrateMessagesTarget

-where - AccountIdOf: - From< as Pair>::Public>, - P::TargetTransactionSignScheme: TransactionSignScheme, - BalanceOf: TryFrom>, -{ - async fn state(&self) -> Result>, SubstrateError> { - // we can't continue to deliver messages if target node is out of sync, because - // it may have already received (some of) messages that we're going to deliver - self.target_client.ensure_synced().await?; - // we can't relay messages if messages pallet at target chain is halted - self.ensure_pallet_active().await?; - - read_client_state( - &self.target_client, - Some(&self.source_client), - P::SourceChain::BEST_FINALIZED_HEADER_ID_METHOD, - ) - .await - } - - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf>, - ) -> Result<(TargetHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is received - let latest_received_nonce = self - .inbound_lane_data(id) - .await? - .map(|data| data.last_delivered_nonce()) - .unwrap_or(0); - Ok((id, latest_received_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf>, - ) -> Result<(TargetHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is received - let last_confirmed_nonce = self - .inbound_lane_data(id) - .await? - .map(|data| data.last_confirmed_nonce) - .unwrap_or(0); - Ok((id, last_confirmed_nonce)) - } - - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf>, - ) -> Result<(TargetHeaderIdOf>, UnrewardedRelayersState), SubstrateError> - { - let relayers = self - .inbound_lane_data(id) - .await? - .map(|data| data.relayers) - .unwrap_or_else(|| VecDeque::new()); - let unrewarded_relayers_state = bp_messages::UnrewardedRelayersState { - unrewarded_relayer_entries: relayers.len() as _, - messages_in_oldest_entry: relayers - .front() - .map(|entry| 1 + entry.messages.end - entry.messages.begin) - .unwrap_or(0), - total_messages: total_unrewarded_messages(&relayers).unwrap_or(MessageNonce::MAX), - }; - Ok((id, unrewarded_relayers_state)) - } - - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf>, - ) -> Result< - ( - TargetHeaderIdOf>, - as MessageLane>::MessagesReceivingProof, - ), - SubstrateError, - > { - let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; - let inbound_data_key = bp_messages::storage_keys::inbound_lane_data_key( - P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - ); - let proof = self - .target_client - .prove_storage(vec![inbound_data_key], id.1) - .await? - .iter_nodes() - .collect(); - let proof = FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: id.1, - storage_proof: proof, - lane: self.lane_id, - }; - Ok((id, (relayers_state, proof))) - } - - async fn submit_messages_proof( - &self, - _generated_at_header: SourceHeaderIdOf>, - nonces: RangeInclusive, - proof: as MessageLane>::MessagesProof, - ) -> Result, SubstrateError> { - let genesis_hash = *self.target_client.genesis_hash(); - let transaction_params = self.transaction_params.clone(); - let relayer_id_at_source = self.relayer_id_at_source.clone(); - let nonces_clone = nonces.clone(); - let (spec_version, transaction_version) = - self.target_client.simple_runtime_version().await?; - self.target_client - .submit_signed_extrinsic( - self.transaction_params.signer.public().into(), - move |best_block_id, transaction_nonce| { - make_messages_delivery_transaction::

( - spec_version, - transaction_version, - &genesis_hash, - &transaction_params, - best_block_id, - transaction_nonce, - relayer_id_at_source, - nonces_clone, - proof, - true, - ) - }, - ) - .await?; - Ok(nonces) - } - - async fn require_source_header_on_target(&self, id: SourceHeaderIdOf>) { - if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay { - source_to_target_headers_relay.require_finalized_header(id).await; - } - } - - async fn estimate_delivery_transaction_in_source_tokens( - &self, - nonces: RangeInclusive, - total_prepaid_nonces: MessageNonce, - total_dispatch_weight: Weight, - total_size: u32, - ) -> Result< as MessageLane>::SourceChainBalance, SubstrateError> { - let conversion_rate = - self.metric_values.target_to_source_conversion_rate().await.ok_or_else(|| { - SubstrateError::Custom(format!( - "Failed to compute conversion rate from {} to {}", - P::TargetChain::NAME, - P::SourceChain::NAME, - )) - })?; - - let (spec_version, transaction_version) = - self.target_client.simple_runtime_version().await?; - // Prepare 'dummy' delivery transaction - we only care about its length and dispatch weight. - let delivery_tx = make_messages_delivery_transaction::

( - spec_version, - transaction_version, - self.target_client.genesis_hash(), - &self.transaction_params, - HeaderId(Default::default(), Default::default()), - Zero::zero(), - self.relayer_id_at_source.clone(), - nonces.clone(), - prepare_dummy_messages_proof::( - nonces.clone(), - total_dispatch_weight, - total_size, - ), - false, - )?; - let delivery_tx_fee = self.target_client.estimate_extrinsic_fee(delivery_tx).await?; - let inclusion_fee_in_target_tokens = delivery_tx_fee.inclusion_fee(); - - // The pre-dispatch cost of delivery transaction includes additional fee to cover dispatch - // fee payment (Currency::transfer in regular deployment). But if message dispatch has - // already been paid at the Source chain, the delivery transaction will refund relayer with - // this additional cost. But `estimate_extrinsic_fee` obviously just returns pre-dispatch - // cost of the transaction. So if transaction delivers prepaid message, then it may happen - // that pre-dispatch cost is larger than reward and `Rational` relayer will refuse to - // deliver this message. - // - // The most obvious solution would be to deduct total weight of dispatch fee payments from - // the `total_dispatch_weight` and use regular `estimate_extrinsic_fee` call. But what if - // `total_dispatch_weight` is less than total dispatch fee payments weight? Weight is - // strictly positive, so we can't use this option. - // - // Instead we'll be directly using `WeightToFee` and `NextFeeMultiplier` of the Target - // chain. This requires more knowledge of the Target chain, but seems there's no better way - // to solve this now. - let expected_refund_in_target_tokens = if total_prepaid_nonces != 0 { - const WEIGHT_DIFFERENCE: Weight = 100; - - let (spec_version, transaction_version) = - self.target_client.simple_runtime_version().await?; - let larger_dispatch_weight = total_dispatch_weight.saturating_add(WEIGHT_DIFFERENCE); - let dummy_tx = make_messages_delivery_transaction::

( - spec_version, - transaction_version, - self.target_client.genesis_hash(), - &self.transaction_params, - HeaderId(Default::default(), Default::default()), - Zero::zero(), - self.relayer_id_at_source.clone(), - nonces.clone(), - prepare_dummy_messages_proof::( - nonces.clone(), - larger_dispatch_weight, - total_size, - ), - false, - )?; - let larger_delivery_tx_fee = - self.target_client.estimate_extrinsic_fee(dummy_tx).await?; - - compute_prepaid_messages_refund::( - total_prepaid_nonces, - compute_fee_multiplier::( - delivery_tx_fee.adjusted_weight_fee, - total_dispatch_weight, - larger_delivery_tx_fee.adjusted_weight_fee, - larger_dispatch_weight, - ), - ) - } else { - Zero::zero() - }; - - let delivery_fee_in_source_tokens = - convert_target_tokens_to_source_tokens::( - FixedU128::from_float(conversion_rate), - inclusion_fee_in_target_tokens.saturating_sub(expected_refund_in_target_tokens), - ); - - log::trace!( - target: "bridge", - "Estimated {} -> {} messages delivery transaction.\n\t\ - Total nonces: {:?}\n\t\ - Prepaid messages: {}\n\t\ - Total messages size: {}\n\t\ - Total messages dispatch weight: {}\n\t\ - Inclusion fee (in {1} tokens): {:?}\n\t\ - Expected refund (in {1} tokens): {:?}\n\t\ - {1} -> {0} conversion rate: {:?}\n\t\ - Expected delivery tx fee (in {0} tokens): {:?}", - P::SourceChain::NAME, - P::TargetChain::NAME, - nonces, - total_prepaid_nonces, - total_size, - total_dispatch_weight, - inclusion_fee_in_target_tokens, - expected_refund_in_target_tokens, - conversion_rate, - delivery_fee_in_source_tokens, - ); - - Ok(delivery_fee_in_source_tokens) - } -} - -/// Make messages delivery transaction from given proof. -#[allow(clippy::too_many_arguments)] -fn make_messages_delivery_transaction( - spec_version: u32, - transaction_version: u32, - target_genesis_hash: &HashOf, - target_transaction_params: &TransactionParams>, - target_best_block_id: HeaderIdOf, - transaction_nonce: IndexOf, - relayer_id_at_source: AccountIdOf, - nonces: RangeInclusive, - proof: SubstrateMessagesProof, - trace_call: bool, -) -> Result -where - P::TargetTransactionSignScheme: TransactionSignScheme, -{ - let messages_count = nonces.end() - nonces.start() + 1; - let dispatch_weight = proof.0; - let call = P::ReceiveMessagesProofCallBuilder::build_receive_messages_proof_call( - relayer_id_at_source, - proof, - messages_count as _, - dispatch_weight, - trace_call, - ); - Ok(Bytes( - P::TargetTransactionSignScheme::sign_transaction(SignParam { - spec_version, - transaction_version, - genesis_hash: *target_genesis_hash, - signer: target_transaction_params.signer.clone(), - era: TransactionEra::new(target_best_block_id, target_transaction_params.mortality), - unsigned: UnsignedTransaction::new(call.into(), transaction_nonce), - })? - .encode(), - )) -} - -/// Prepare 'dummy' messages proof that will compose the delivery transaction. -/// -/// We don't care about proof actually being the valid proof, because its validity doesn't -/// affect the call weight - we only care about its size. -fn prepare_dummy_messages_proof( - nonces: RangeInclusive, - total_dispatch_weight: Weight, - total_size: u32, -) -> SubstrateMessagesProof { - ( - total_dispatch_weight, - FromBridgedChainMessagesProof { - bridged_header_hash: Default::default(), - storage_proof: vec![vec![ - 0; - SC::STORAGE_PROOF_OVERHEAD.saturating_add(total_size) as usize - ]], - lane: Default::default(), - nonces_start: *nonces.start(), - nonces_end: *nonces.end(), - }, - ) -} - -/// Given delivery transaction fee in target chain tokens and conversion rate to the source -/// chain tokens, compute transaction cost in source chain tokens. -fn convert_target_tokens_to_source_tokens( - target_to_source_conversion_rate: FixedU128, - target_transaction_fee: TC::Balance, -) -> SC::Balance -where - SC::Balance: TryFrom, -{ - SC::Balance::try_from( - target_to_source_conversion_rate.saturating_mul_int(target_transaction_fee), - ) - .unwrap_or_else(|_| SC::Balance::max_value()) -} - -/// Compute fee multiplier that is used by the chain, given a couple of fees for transactions -/// that are only differ in dispatch weights. -/// -/// This function assumes that standard transaction payment pallet is used by the chain. -/// The only fee component that depends on dispatch weight is the `adjusted_weight_fee`. -/// -/// **WARNING**: this functions will only be accurate if weight-to-fee conversion function -/// is linear. For non-linear polynomials the error will grow with `weight_difference` growth. -/// So better to use smaller differences. -fn compute_fee_multiplier( - smaller_adjusted_weight_fee: BalanceOf, - smaller_tx_weight: Weight, - larger_adjusted_weight_fee: BalanceOf, - larger_tx_weight: Weight, -) -> FixedU128 { - let adjusted_weight_fee_difference = - larger_adjusted_weight_fee.saturating_sub(smaller_adjusted_weight_fee); - let smaller_tx_unadjusted_weight_fee = WeightToFeeOf::::weight_to_fee(&smaller_tx_weight); - let larger_tx_unadjusted_weight_fee = WeightToFeeOf::::weight_to_fee(&larger_tx_weight); - FixedU128::saturating_from_rational( - adjusted_weight_fee_difference, - larger_tx_unadjusted_weight_fee.saturating_sub(smaller_tx_unadjusted_weight_fee), - ) -} - -/// Compute fee that will be refunded to the relayer because dispatch of `total_prepaid_nonces` -/// messages has been paid at the source chain. -fn compute_prepaid_messages_refund( - total_prepaid_nonces: MessageNonce, - fee_multiplier: FixedU128, -) -> BalanceOf { - fee_multiplier.saturating_mul_int(WeightToFeeOf::::weight_to_fee( - &C::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN.saturating_mul(total_prepaid_nonces), - )) -} - -#[cfg(test)] -mod tests { - use super::*; - use relay_rococo_client::Rococo; - use relay_wococo_client::Wococo; - - #[test] - fn prepare_dummy_messages_proof_works() { - const DISPATCH_WEIGHT: Weight = 1_000_000; - const SIZE: u32 = 1_000; - let dummy_proof = prepare_dummy_messages_proof::(1..=10, DISPATCH_WEIGHT, SIZE); - assert_eq!(dummy_proof.0, DISPATCH_WEIGHT); - assert!( - dummy_proof.1.encode().len() as u32 > SIZE, - "Expected proof size at least {}. Got: {}", - SIZE, - dummy_proof.1.encode().len(), - ); - } - - #[test] - fn convert_target_tokens_to_source_tokens_works() { - assert_eq!( - convert_target_tokens_to_source_tokens::((150, 100).into(), 1_000), - 1_500 - ); - assert_eq!( - convert_target_tokens_to_source_tokens::((50, 100).into(), 1_000), - 500 - ); - assert_eq!( - convert_target_tokens_to_source_tokens::((100, 100).into(), 1_000), - 1_000 - ); - } - - #[test] - fn compute_fee_multiplier_returns_sane_results() { - let multiplier = FixedU128::saturating_from_rational(1, 1000); - - let smaller_weight = 1_000_000; - let smaller_adjusted_weight_fee = - multiplier.saturating_mul_int(WeightToFeeOf::::weight_to_fee(&smaller_weight)); - - let larger_weight = smaller_weight + 200_000; - let larger_adjusted_weight_fee = - multiplier.saturating_mul_int(WeightToFeeOf::::weight_to_fee(&larger_weight)); - - assert_eq!( - compute_fee_multiplier::( - smaller_adjusted_weight_fee, - smaller_weight, - larger_adjusted_weight_fee, - larger_weight, - ), - multiplier, - ); - } - - #[test] - fn compute_prepaid_messages_refund_returns_sane_results() { - assert!( - compute_prepaid_messages_refund::( - 10, - FixedU128::saturating_from_rational(110, 100), - ) > (10 * Wococo::PAY_INBOUND_DISPATCH_FEE_WEIGHT_AT_CHAIN).into() - ); - } -} diff --git a/polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs b/polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs deleted file mode 100644 index c1401a28a6d..00000000000 --- a/polkadot/bridges/relays/lib-substrate-relay/src/on_demand_headers.rs +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! On-demand Substrate -> Substrate headers relay. - -use async_std::sync::{Arc, Mutex}; -use futures::{select, FutureExt}; -use num_traits::{One, Zero}; - -use finality_relay::{FinalitySyncParams, SourceHeader, TargetClient as FinalityTargetClient}; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, Client, HeaderIdOf, HeaderOf, SyncHeader, - TransactionSignScheme, -}; -use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError, -}; - -use crate::{ - finality_pipeline::{SubstrateFinalitySyncPipeline, RECENT_FINALITY_PROOFS_LIMIT}, - finality_source::{RequiredHeaderNumberRef, SubstrateFinalitySource}, - finality_target::SubstrateFinalityTarget, - TransactionParams, STALL_TIMEOUT, -}; - -/// On-demand Substrate <-> Substrate headers relay. -/// -/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages -/// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops -/// syncing headers. -#[derive(Clone)] -pub struct OnDemandHeadersRelay { - /// Relay task name. - relay_task_name: String, - /// Shared reference to maximal required finalized header number. - required_header_number: RequiredHeaderNumberRef, -} - -impl OnDemandHeadersRelay { - /// Create new on-demand headers relay. - pub fn new>( - source_client: Client, - target_client: Client, - target_transaction_params: TransactionParams>, - only_mandatory_headers: bool, - ) -> Self - where - AccountIdOf: - From< as sp_core::Pair>::Public>, - P::TransactionSignScheme: TransactionSignScheme, - { - let required_header_number = Arc::new(Mutex::new(Zero::zero())); - let this = OnDemandHeadersRelay { - relay_task_name: on_demand_headers_relay_name::(), - required_header_number: required_header_number.clone(), - }; - async_std::task::spawn(async move { - background_task::

( - source_client, - target_client, - target_transaction_params, - only_mandatory_headers, - required_header_number, - ) - .await; - }); - - this - } - - /// Someone is asking us to relay given finalized header. - pub async fn require_finalized_header(&self, header_id: HeaderIdOf) { - let mut required_header_number = self.required_header_number.lock().await; - if header_id.0 > *required_header_number { - log::trace!( - target: "bridge", - "More {} headers required in {} relay. Going to sync up to the {}", - SourceChain::NAME, - self.relay_task_name, - header_id.0, - ); - - *required_header_number = header_id.0; - } - } -} - -/// Background task that is responsible for starting headers relay. -async fn background_task( - source_client: Client, - target_client: Client, - target_transaction_params: TransactionParams>, - only_mandatory_headers: bool, - required_header_number: RequiredHeaderNumberRef, -) where - AccountIdOf: - From< as sp_core::Pair>::Public>, - P::TransactionSignScheme: TransactionSignScheme, -{ - let relay_task_name = on_demand_headers_relay_name::(); - let target_transactions_mortality = target_transaction_params.mortality; - let mut finality_source = SubstrateFinalitySource::

::new( - source_client.clone(), - Some(required_header_number.clone()), - ); - let mut finality_target = - SubstrateFinalityTarget::new(target_client.clone(), target_transaction_params); - let mut latest_non_mandatory_at_source = Zero::zero(); - - let mut restart_relay = true; - let finality_relay_task = futures::future::Fuse::terminated(); - futures::pin_mut!(finality_relay_task); - - loop { - select! { - _ = async_std::task::sleep(P::TargetChain::AVERAGE_BLOCK_INTERVAL).fuse() => {}, - _ = finality_relay_task => { - // this should never happen in practice given the current code - restart_relay = true; - }, - } - - // read best finalized source header number from source - let best_finalized_source_header_at_source = - best_finalized_source_header_at_source(&finality_source, &relay_task_name).await; - if matches!(best_finalized_source_header_at_source, Err(ref e) if e.is_connection_error()) { - relay_utils::relay_loop::reconnect_failed_client( - FailedClient::Source, - relay_utils::relay_loop::RECONNECT_DELAY, - &mut finality_source, - &mut finality_target, - ) - .await; - continue - } - - // read best finalized source header number from target - let best_finalized_source_header_at_target = - best_finalized_source_header_at_target::

(&finality_target, &relay_task_name).await; - if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) { - relay_utils::relay_loop::reconnect_failed_client( - FailedClient::Target, - relay_utils::relay_loop::RECONNECT_DELAY, - &mut finality_source, - &mut finality_target, - ) - .await; - continue - } - - // submit mandatory header if some headers are missing - let best_finalized_source_header_at_source_fmt = - format!("{:?}", best_finalized_source_header_at_source); - let best_finalized_source_header_at_target_fmt = - format!("{:?}", best_finalized_source_header_at_target); - let required_header_number_value = *required_header_number.lock().await; - let mandatory_scan_range = mandatory_headers_scan_range::( - best_finalized_source_header_at_source.ok(), - best_finalized_source_header_at_target.ok(), - required_header_number_value, - ) - .await; - - log::trace!( - target: "bridge", - "Mandatory headers scan range in {}: ({:?}, {:?}, {:?}) -> {:?}", - relay_task_name, - required_header_number_value, - best_finalized_source_header_at_source_fmt, - best_finalized_source_header_at_target_fmt, - mandatory_scan_range, - ); - - if let Some(mandatory_scan_range) = mandatory_scan_range { - let relay_mandatory_header_result = relay_mandatory_header_from_range( - &finality_source, - &required_header_number, - best_finalized_source_header_at_target_fmt, - ( - std::cmp::max(mandatory_scan_range.0, latest_non_mandatory_at_source), - mandatory_scan_range.1, - ), - &relay_task_name, - ) - .await; - match relay_mandatory_header_result { - Ok(true) => (), - Ok(false) => { - // there are no (or we don't need to relay them) mandatory headers in the range - // => to avoid scanning the same headers over and over again, remember that - latest_non_mandatory_at_source = mandatory_scan_range.1; - - log::trace!( - target: "bridge", - "No mandatory {} headers in the range {:?} of {} relay", - P::SourceChain::NAME, - mandatory_scan_range, - relay_task_name, - ); - }, - Err(e) => { - log::warn!( - target: "bridge", - "Failed to scan mandatory {} headers range in {} relay (range: {:?}): {:?}", - P::SourceChain::NAME, - relay_task_name, - mandatory_scan_range, - e, - ); - - if e.is_connection_error() { - relay_utils::relay_loop::reconnect_failed_client( - FailedClient::Source, - relay_utils::relay_loop::RECONNECT_DELAY, - &mut finality_source, - &mut finality_target, - ) - .await; - continue - } - }, - } - } - - // start/restart relay - if restart_relay { - let stall_timeout = relay_substrate_client::transaction_stall_timeout( - target_transactions_mortality, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - - log::info!( - target: "bridge", - "Starting {} relay\n\t\ - Only mandatory headers: {}\n\t\ - Tx mortality: {:?} (~{}m)\n\t\ - Stall timeout: {:?}", - relay_task_name, - only_mandatory_headers, - target_transactions_mortality, - stall_timeout.as_secs_f64() / 60.0f64, - stall_timeout, - ); - - finality_relay_task.set( - finality_relay::run( - finality_source.clone(), - finality_target.clone(), - FinalitySyncParams { - tick: std::cmp::max( - P::SourceChain::AVERAGE_BLOCK_INTERVAL, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - ), - recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, - stall_timeout, - only_mandatory_headers, - }, - MetricsParams::disabled(), - futures::future::pending(), - ) - .fuse(), - ); - - restart_relay = false; - } - } -} - -/// Returns `Some()` with inclusive range of headers which must be scanned for mandatory headers -/// and the first of such headers must be submitted to the target node. -async fn mandatory_headers_scan_range( - best_finalized_source_header_at_source: Option, - best_finalized_source_header_at_target: Option, - required_header_number: BlockNumberOf, -) -> Option<(C::BlockNumber, C::BlockNumber)> { - // if we have been unable to read header number from the target, then let's assume - // that it is the same as required header number. Otherwise we risk submitting - // unneeded transactions - let best_finalized_source_header_at_target = - best_finalized_source_header_at_target.unwrap_or(required_header_number); - - // if we have been unable to read header number from the source, then let's assume - // that it is the same as at the target - let best_finalized_source_header_at_source = - best_finalized_source_header_at_source.unwrap_or(best_finalized_source_header_at_target); - - // if relay is already asked to sync more headers than we have at source, don't do anything yet - if required_header_number >= best_finalized_source_header_at_source { - return None - } - - Some(( - best_finalized_source_header_at_target + One::one(), - best_finalized_source_header_at_source, - )) -} - -/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay -/// it. -/// -/// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. -async fn relay_mandatory_header_from_range( - finality_source: &SubstrateFinalitySource

, - required_header_number: &RequiredHeaderNumberRef, - best_finalized_source_header_at_target: String, - range: (BlockNumberOf, BlockNumberOf), - relay_task_name: &str, -) -> Result { - // search for mandatory header first - let mandatory_source_header_number = - find_mandatory_header_in_range(finality_source, range).await?; - - // if there are no mandatory headers - we have nothing to do - let mandatory_source_header_number = match mandatory_source_header_number { - Some(mandatory_source_header_number) => mandatory_source_header_number, - None => return Ok(false), - }; - - // `find_mandatory_header` call may take a while => check if `required_header_number` is still - // less than our `mandatory_source_header_number` before logging anything - let mut required_header_number = required_header_number.lock().await; - if *required_header_number >= mandatory_source_header_number { - return Ok(false) - } - - log::trace!( - target: "bridge", - "Too many {} headers missing at target in {} relay ({} vs {}). Going to sync up to the mandatory {}", - P::SourceChain::NAME, - relay_task_name, - best_finalized_source_header_at_target, - range.1, - mandatory_source_header_number, - ); - - *required_header_number = mandatory_source_header_number; - Ok(true) -} - -/// Read best finalized source block number from source client. -/// -/// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_source( - finality_source: &SubstrateFinalitySource

, - relay_task_name: &str, -) -> Result, relay_substrate_client::Error> { - finality_source.on_chain_best_finalized_block_number().await.map_err(|error| { - log::error!( - target: "bridge", - "Failed to read best finalized source header from source in {} relay: {:?}", - relay_task_name, - error, - ); - - error - }) -} - -/// Read best finalized source block number from target client. -/// -/// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_target( - finality_target: &SubstrateFinalityTarget

, - relay_task_name: &str, -) -> Result, as RelayClient>::Error> -where - AccountIdOf: - From< as sp_core::Pair>::Public>, - P::TransactionSignScheme: TransactionSignScheme, -{ - finality_target - .best_finalized_source_block_id() - .await - .map_err(|error| { - log::error!( - target: "bridge", - "Failed to read best finalized source header from target in {} relay: {:?}", - relay_task_name, - error, - ); - - error - }) - .map(|id| id.0) -} - -/// Read first mandatory header in given inclusive range. -/// -/// Returns `Ok(None)` if there were no mandatory headers in the range. -async fn find_mandatory_header_in_range( - finality_source: &SubstrateFinalitySource

, - range: (BlockNumberOf, BlockNumberOf), -) -> Result>, relay_substrate_client::Error> { - let mut current = range.0; - while current <= range.1 { - let header: SyncHeader> = - finality_source.client().header_by_number(current).await?.into(); - if header.is_mandatory() { - return Ok(Some(current)) - } - - current += One::one(); - } - - Ok(None) -} - -/// On-demand headers relay task name. -fn on_demand_headers_relay_name() -> String { - format!("on-demand-{}-to-{}", SourceChain::NAME, TargetChain::NAME) -} - -#[cfg(test)] -mod tests { - use super::*; - - type TestChain = relay_rococo_client::Rococo; - - const AT_SOURCE: Option = Some(10); - const AT_TARGET: Option = Some(1); - - #[async_std::test] - async fn mandatory_headers_scan_range_selects_range_if_some_headers_are_missing() { - assert_eq!( - mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, 0,).await, - Some((AT_TARGET.unwrap() + 1, AT_SOURCE.unwrap())), - ); - } - - #[async_std::test] - async fn mandatory_headers_scan_range_selects_nothing_if_already_queued() { - assert_eq!( - mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, AT_SOURCE.unwrap(),) - .await, - None, - ); - } -} diff --git a/polkadot/bridges/relays/messages/Cargo.toml b/polkadot/bridges/relays/messages/Cargo.toml deleted file mode 100644 index b3357994b12..00000000000 --- a/polkadot/bridges/relays/messages/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "messages-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -async-std = { version = "1.6.5", features = ["attributes"] } -async-trait = "0.1.40" -futures = "0.3.5" -hex = "0.4" -log = "0.4.11" -num-traits = "0.2" -parking_lot = "0.11.0" - -# Bridge Dependencies - -bp-messages = { path = "../../primitives/messages" } -bp-runtime = { path = "../../primitives/runtime" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } - -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/messages/src/lib.rs b/polkadot/bridges/relays/messages/src/lib.rs deleted file mode 100644 index c9e46030034..00000000000 --- a/polkadot/bridges/relays/messages/src/lib.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying [`pallet-bridge-messages`](../pallet_bridge_messages/index.html) application specific -//! data. Message lane allows sending arbitrary messages between bridged chains. This -//! module provides entrypoint that starts reading messages from given message lane -//! of source chain and submits proof-of-message-at-source-chain transactions to the -//! target chain. Additionally, proofs-of-messages-delivery are sent back from the -//! target chain to the source chain. - -// required for futures::select! -#![recursion_limit = "1024"] -#![warn(missing_docs)] - -mod metrics; - -pub mod message_lane; -pub mod message_lane_loop; -pub mod relay_strategy; - -mod message_race_delivery; -mod message_race_loop; -mod message_race_receiving; -mod message_race_strategy; diff --git a/polkadot/bridges/relays/messages/src/message_lane.rs b/polkadot/bridges/relays/messages/src/message_lane.rs deleted file mode 100644 index 5c9728ad93a..00000000000 --- a/polkadot/bridges/relays/messages/src/message_lane.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! One-way message lane types. Within single one-way lane we have three 'races' where we try to: -//! -//! 1) relay new messages from source to target node; -//! 2) relay proof-of-delivery from target to source node. - -use num_traits::{SaturatingAdd, Zero}; -use relay_utils::{BlockNumberBase, HeaderId}; -use sp_arithmetic::traits::AtLeast32BitUnsigned; -use std::{fmt::Debug, ops::Sub}; - -/// One-way message lane. -pub trait MessageLane: 'static + Clone + Send + Sync { - /// Name of the messages source. - const SOURCE_NAME: &'static str; - /// Name of the messages target. - const TARGET_NAME: &'static str; - - /// Messages proof. - type MessagesProof: Clone + Debug + Send + Sync; - /// Messages receiving proof. - type MessagesReceivingProof: Clone + Debug + Send + Sync; - - /// The type of the source chain token balance, that is used to: - /// - /// 1) pay transaction fees; - /// 2) pay message delivery and dispatch fee; - /// 3) pay relayer rewards. - type SourceChainBalance: AtLeast32BitUnsigned - + Clone - + Copy - + Debug - + PartialOrd - + Sub - + SaturatingAdd - + Zero - + Send - + Sync; - /// Number of the source header. - type SourceHeaderNumber: BlockNumberBase; - /// Hash of the source header. - type SourceHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; - - /// Number of the target header. - type TargetHeaderNumber: BlockNumberBase; - /// Hash of the target header. - type TargetHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; -} - -/// Source header id within given one-way message lane. -pub type SourceHeaderIdOf

= - HeaderId<

::SourceHeaderHash,

::SourceHeaderNumber>; - -/// Target header id within given one-way message lane. -pub type TargetHeaderIdOf

= - HeaderId<

::TargetHeaderHash,

::TargetHeaderNumber>; diff --git a/polkadot/bridges/relays/messages/src/message_lane_loop.rs b/polkadot/bridges/relays/messages/src/message_lane_loop.rs deleted file mode 100644 index 1f293990e44..00000000000 --- a/polkadot/bridges/relays/messages/src/message_lane_loop.rs +++ /dev/null @@ -1,968 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Message delivery loop. Designed to work with messages pallet. -//! -//! Single relay instance delivers messages of single lane in single direction. -//! To serve two-way lane, you would need two instances of relay. -//! To serve N two-way lanes, you would need N*2 instances of relay. -//! -//! Please keep in mind that the best header in this file is actually best -//! finalized header. I.e. when talking about headers in lane context, we -//! only care about finalized headers. - -use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration}; - -use async_trait::async_trait; -use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; - -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; -use bp_runtime::messages::DispatchFeePayment; -use relay_utils::{ - interval, metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, - retry_backoff, FailedClient, -}; - -use crate::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_race_delivery::run as run_message_delivery_race, - message_race_receiving::run as run_message_receiving_race, - metrics::MessageLaneLoopMetrics, - relay_strategy::RelayStrategy, -}; - -/// Message lane loop configuration params. -#[derive(Debug, Clone)] -pub struct Params { - /// Id of lane this loop is servicing. - pub lane: LaneId, - /// Interval at which we ask target node about its updates. - pub source_tick: Duration, - /// Interval at which we ask target node about its updates. - pub target_tick: Duration, - /// Delay between moments when connection error happens and our reconnect attempt. - pub reconnect_delay: Duration, - /// The loop will auto-restart if there has been no updates during this period. - pub stall_timeout: Duration, - /// Message delivery race parameters. - pub delivery_params: MessageDeliveryParams, -} - -/// Relayer operating mode. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum RelayerMode { - /// The relayer doesn't care about rewards. - Altruistic, - /// The relayer will deliver all messages and confirmations as long as he's not losing any - /// funds. - Rational, -} - -/// Message delivery race parameters. -#[derive(Debug, Clone)] -pub struct MessageDeliveryParams { - /// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number - /// of entries in the `InboundLaneData::relayers` set, all new messages will be rejected until - /// reward payment will be proved (by including outbound lane state to the message delivery - /// transaction). - pub max_unrewarded_relayer_entries_at_target: MessageNonce, - /// Message delivery race will stop delivering messages if there are - /// `max_unconfirmed_nonces_at_target` unconfirmed nonces on the target node. The race would - /// continue once they're confirmed by the receiving race. - pub max_unconfirmed_nonces_at_target: MessageNonce, - /// Maximal number of relayed messages in single delivery transaction. - pub max_messages_in_single_batch: MessageNonce, - /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. - pub max_messages_weight_in_single_batch: Weight, - /// Maximal cumulative size of relayed messages in single delivery transaction. - pub max_messages_size_in_single_batch: u32, - /// Relay strategy - pub relay_strategy: Strategy, -} - -/// Message details. -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct MessageDetails { - /// Message dispatch weight. - pub dispatch_weight: Weight, - /// Message size (number of bytes in encoded payload). - pub size: u32, - /// The relayer reward paid in the source chain tokens. - pub reward: SourceChainBalance, - /// Where the fee for dispatching message is paid? - pub dispatch_fee_payment: DispatchFeePayment, -} - -/// Messages details map. -pub type MessageDetailsMap = - BTreeMap>; - -/// Message delivery race proof parameters. -#[derive(Debug, PartialEq)] -pub struct MessageProofParameters { - /// Include outbound lane state proof? - pub outbound_state_proof_required: bool, - /// Cumulative dispatch weight of messages that we're building proof for. - pub dispatch_weight: Weight, -} - -/// Source client trait. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Returns state of the client. - async fn state(&self) -> Result, Self::Error>; - - /// Get nonce of instance of latest generated message. - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; - - /// Get nonce of the latest message, which receiving has been confirmed by the target chain. - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; - - /// Returns mapping of message nonces, generated on this client, to their weights. - /// - /// Some messages may be missing from returned map, if corresponding messages were pruned at - /// the source chain. - async fn generated_message_details( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - ) -> Result, Self::Error>; - - /// Prove messages in inclusive range [begin; end]. - async fn prove_messages( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error>; - - /// Submit messages receiving proof. - async fn submit_messages_receiving_proof( - &self, - generated_at_block: TargetHeaderIdOf

, - proof: P::MessagesReceivingProof, - ) -> Result<(), Self::Error>; - - /// We need given finalized target header on source to continue synchronization. - async fn require_target_header_on_source(&self, id: TargetHeaderIdOf

); - - /// Estimate cost of single message confirmation transaction in source chain tokens. - async fn estimate_confirmation_transaction(&self) -> P::SourceChainBalance; -} - -/// Target client trait. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Returns state of the client. - async fn state(&self) -> Result, Self::Error>; - - /// Get nonce of latest received message. - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - - /// Get nonce of the latest confirmed message. - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - - /// Get state of unrewarded relayers set at the inbound lane. - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, UnrewardedRelayersState), Self::Error>; - - /// Prove messages receiving at given block. - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), Self::Error>; - - /// Submit messages proof. - async fn submit_messages_proof( - &self, - generated_at_header: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, Self::Error>; - - /// We need given finalized source header on target to continue synchronization. - async fn require_source_header_on_target(&self, id: SourceHeaderIdOf

); - - /// Estimate cost of messages delivery transaction in source chain tokens. - /// - /// Please keep in mind that the returned cost must be converted to the source chain - /// tokens, even though the transaction fee will be paid in the target chain tokens. - async fn estimate_delivery_transaction_in_source_tokens( - &self, - nonces: RangeInclusive, - total_prepaid_nonces: MessageNonce, - total_dispatch_weight: Weight, - total_size: u32, - ) -> Result; -} - -/// State of the client. -#[derive(Clone, Debug, Default, PartialEq)] -pub struct ClientState { - /// The best header id of this chain. - pub best_self: SelfHeaderId, - /// Best finalized header id of this chain. - pub best_finalized_self: SelfHeaderId, - /// Best finalized header id of the peer chain read at the best block of this chain (at - /// `best_finalized_self`). - pub best_finalized_peer_at_best_self: PeerHeaderId, - /// Header id of the peer chain with the number, matching the - /// `best_finalized_peer_at_best_self`. - pub actual_best_finalized_peer_at_best_self: PeerHeaderId, -} - -/// State of source client in one-way message lane. -pub type SourceClientState

= ClientState, TargetHeaderIdOf

>; - -/// State of target client in one-way message lane. -pub type TargetClientState

= ClientState, SourceHeaderIdOf

>; - -/// Both clients state. -#[derive(Debug, Default)] -pub struct ClientsState { - /// Source client state. - pub source: Option>, - /// Target client state. - pub target: Option>, -} - -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs -/// sync loop. -pub fn metrics_prefix(lane: &LaneId) -> String { - format!("{}_to_{}_MessageLane_{}", P::SOURCE_NAME, P::TARGET_NAME, hex::encode(lane)) -} - -/// Run message lane service loop. -pub async fn run( - params: Params, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_params: MetricsParams, - exit_signal: impl Future + Send + 'static, -) -> Result<(), relay_utils::Error> { - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .reconnect_delay(params.reconnect_delay) - .with_metrics(metrics_params) - .loop_metric(MessageLaneLoopMetrics::new(Some(&metrics_prefix::

(¶ms.lane)))?)? - .expose() - .await? - .run(metrics_prefix::

(¶ms.lane), move |source_client, target_client, metrics| { - run_until_connection_lost( - params.clone(), - source_client, - target_client, - metrics, - exit_signal.clone(), - ) - }) - .await -} - -/// Run one-way message delivery loop until connection with target or source node is lost, or exit -/// signal is received. -async fn run_until_connection_lost< - P: MessageLane, - Strategy: RelayStrategy, - SC: SourceClient

, - TC: TargetClient

, ->( - params: Params, - source_client: SC, - target_client: TC, - metrics_msg: Option, - exit_signal: impl Future, -) -> Result<(), FailedClient> { - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = false; - let mut source_state_required = true; - let source_state = source_client.state().fuse(); - let source_go_offline_future = futures::future::Fuse::terminated(); - let source_tick_stream = interval(params.source_tick).fuse(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = false; - let mut target_state_required = true; - let target_state = target_client.state().fuse(); - let target_go_offline_future = futures::future::Fuse::terminated(); - let target_tick_stream = interval(params.target_tick).fuse(); - - let ( - (delivery_source_state_sender, delivery_source_state_receiver), - (delivery_target_state_sender, delivery_target_state_receiver), - ) = (unbounded(), unbounded()); - let delivery_race_loop = run_message_delivery_race( - source_client.clone(), - delivery_source_state_receiver, - target_client.clone(), - delivery_target_state_receiver, - params.stall_timeout, - metrics_msg.clone(), - params.delivery_params, - ) - .fuse(); - - let ( - (receiving_source_state_sender, receiving_source_state_receiver), - (receiving_target_state_sender, receiving_target_state_receiver), - ) = (unbounded(), unbounded()); - let receiving_race_loop = run_message_receiving_race( - source_client.clone(), - receiving_source_state_receiver, - target_client.clone(), - receiving_target_state_receiver, - params.stall_timeout, - metrics_msg.clone(), - ) - .fuse(); - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!( - source_state, - source_go_offline_future, - source_tick_stream, - target_state, - target_go_offline_future, - target_tick_stream, - delivery_race_loop, - receiving_race_loop, - exit_signal - ); - - loop { - futures::select! { - new_source_state = source_state => { - source_state_required = false; - - source_client_is_online = process_future_result( - new_source_state, - &mut source_retry_backoff, - |new_source_state| { - log::debug!( - target: "bridge", - "Received state from {} node: {:?}", - P::SOURCE_NAME, - new_source_state, - ); - let _ = delivery_source_state_sender.unbounded_send(new_source_state.clone()); - let _ = receiving_source_state_sender.unbounded_send(new_source_state.clone()); - - if let Some(metrics_msg) = metrics_msg.as_ref() { - metrics_msg.update_source_state::

(new_source_state); - } - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving state from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = source_tick_stream.next() => { - source_state_required = true; - }, - new_target_state = target_state => { - target_state_required = false; - - target_client_is_online = process_future_result( - new_target_state, - &mut target_retry_backoff, - |new_target_state| { - log::debug!( - target: "bridge", - "Received state from {} node: {:?}", - P::TARGET_NAME, - new_target_state, - ); - let _ = delivery_target_state_sender.unbounded_send(new_target_state.clone()); - let _ = receiving_target_state_sender.unbounded_send(new_target_state.clone()); - - if let Some(metrics_msg) = metrics_msg.as_ref() { - metrics_msg.update_target_state::

(new_target_state); - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving state from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - _ = target_tick_stream.next() => { - target_state_required = true; - }, - - delivery_error = delivery_race_loop => { - match delivery_error { - Ok(_) => unreachable!("only ends with error; qed"), - Err(err) => return Err(err), - } - }, - receiving_error = receiving_race_loop => { - match receiving_error { - Ok(_) => unreachable!("only ends with error; qed"), - Err(err) => return Err(err), - } - }, - - () = exit_signal => { - return Ok(()); - } - } - - if source_client_is_online && source_state_required { - log::debug!(target: "bridge", "Asking {} node about its state", P::SOURCE_NAME); - source_state.set(source_client.state().fuse()); - source_client_is_online = false; - } - - if target_client_is_online && target_state_required { - log::debug!(target: "bridge", "Asking {} node about its state", P::TARGET_NAME); - target_state.set(target_client.state().fuse()); - target_client_is_online = false; - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use std::sync::Arc; - - use futures::stream::StreamExt; - use parking_lot::Mutex; - - use relay_utils::{HeaderId, MaybeConnectionError}; - - use crate::relay_strategy::AltruisticStrategy; - - use super::*; - - pub fn header_id(number: TestSourceHeaderNumber) -> TestSourceHeaderId { - HeaderId(number, number) - } - - pub const CONFIRMATION_TRANSACTION_COST: TestSourceChainBalance = 1; - pub const BASE_MESSAGE_DELIVERY_TRANSACTION_COST: TestSourceChainBalance = 1; - - pub type TestSourceChainBalance = u64; - pub type TestSourceHeaderId = HeaderId; - pub type TestTargetHeaderId = HeaderId; - - pub type TestMessagesProof = (RangeInclusive, Option); - pub type TestMessagesReceivingProof = MessageNonce; - - pub type TestSourceHeaderNumber = u64; - pub type TestSourceHeaderHash = u64; - - pub type TestTargetHeaderNumber = u64; - pub type TestTargetHeaderHash = u64; - - #[derive(Debug)] - pub struct TestError; - - impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - true - } - } - - #[derive(Clone)] - pub struct TestMessageLane; - - impl MessageLane for TestMessageLane { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type MessagesProof = TestMessagesProof; - type MessagesReceivingProof = TestMessagesReceivingProof; - - type SourceChainBalance = TestSourceChainBalance; - type SourceHeaderNumber = TestSourceHeaderNumber; - type SourceHeaderHash = TestSourceHeaderHash; - - type TargetHeaderNumber = TestTargetHeaderNumber; - type TargetHeaderHash = TestTargetHeaderHash; - } - - #[derive(Debug, Default, Clone)] - pub struct TestClientData { - is_source_fails: bool, - is_source_reconnected: bool, - source_state: SourceClientState, - source_latest_generated_nonce: MessageNonce, - source_latest_confirmed_received_nonce: MessageNonce, - submitted_messages_receiving_proofs: Vec, - is_target_fails: bool, - is_target_reconnected: bool, - target_state: SourceClientState, - target_latest_received_nonce: MessageNonce, - target_latest_confirmed_received_nonce: MessageNonce, - submitted_messages_proofs: Vec, - target_to_source_header_required: Option, - target_to_source_header_requirements: Vec, - source_to_target_header_required: Option, - source_to_target_header_requirements: Vec, - } - - #[derive(Clone)] - pub struct TestSourceClient { - data: Arc>, - tick: Arc, - } - - impl Default for TestSourceClient { - fn default() -> Self { - TestSourceClient { - data: Arc::new(Mutex::new(TestClientData::default())), - tick: Arc::new(|_| {}), - } - } - } - - #[async_trait] - impl RelayClient for TestSourceClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - data.is_source_reconnected = true; - } - Ok(()) - } - } - - #[async_trait] - impl SourceClient for TestSourceClient { - async fn state(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_source_fails { - return Err(TestError) - } - Ok(data.source_state.clone()) - } - - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf, - ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_source_fails { - return Err(TestError) - } - Ok((id, data.source_latest_generated_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf, - ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - Ok((id, data.source_latest_confirmed_received_nonce)) - } - - async fn generated_message_details( - &self, - _id: SourceHeaderIdOf, - nonces: RangeInclusive, - ) -> Result, TestError> { - Ok(nonces - .map(|nonce| { - ( - nonce, - MessageDetails { - dispatch_weight: 1, - size: 1, - reward: 1, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - }, - ) - }) - .collect()) - } - - async fn prove_messages( - &self, - id: SourceHeaderIdOf, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result< - (SourceHeaderIdOf, RangeInclusive, TestMessagesProof), - TestError, - > { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - Ok(( - id, - nonces.clone(), - ( - nonces, - if proof_parameters.outbound_state_proof_required { - Some(data.source_latest_confirmed_received_nonce) - } else { - None - }, - ), - )) - } - - async fn submit_messages_receiving_proof( - &self, - _generated_at_block: TargetHeaderIdOf, - proof: TestMessagesReceivingProof, - ) -> Result<(), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - data.source_state.best_self = - HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.1 + 1); - data.source_state.best_finalized_self = data.source_state.best_self; - data.submitted_messages_receiving_proofs.push(proof); - data.source_latest_confirmed_received_nonce = proof; - Ok(()) - } - - async fn require_target_header_on_source(&self, id: TargetHeaderIdOf) { - let mut data = self.data.lock(); - data.target_to_source_header_required = Some(id); - data.target_to_source_header_requirements.push(id); - (self.tick)(&mut *data); - } - - async fn estimate_confirmation_transaction(&self) -> TestSourceChainBalance { - CONFIRMATION_TRANSACTION_COST - } - } - - #[derive(Clone)] - pub struct TestTargetClient { - data: Arc>, - tick: Arc, - } - - impl Default for TestTargetClient { - fn default() -> Self { - TestTargetClient { - data: Arc::new(Mutex::new(TestClientData::default())), - tick: Arc::new(|_| {}), - } - } - } - - #[async_trait] - impl RelayClient for TestTargetClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - data.is_target_reconnected = true; - } - Ok(()) - } - } - - #[async_trait] - impl TargetClient for TestTargetClient { - async fn state(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError) - } - Ok(data.target_state.clone()) - } - - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError) - } - Ok((id, data.target_latest_received_nonce)) - } - - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, UnrewardedRelayersState), TestError> { - Ok(( - id, - UnrewardedRelayersState { - unrewarded_relayer_entries: 0, - messages_in_oldest_entry: 0, - total_messages: 0, - }, - )) - } - - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError) - } - Ok((id, data.target_latest_confirmed_received_nonce)) - } - - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, TestMessagesReceivingProof), TestError> { - Ok((id, self.data.lock().target_latest_received_nonce)) - } - - async fn submit_messages_proof( - &self, - _generated_at_header: SourceHeaderIdOf, - nonces: RangeInclusive, - proof: TestMessagesProof, - ) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut *data); - if data.is_target_fails { - return Err(TestError) - } - data.target_state.best_self = - HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); - data.target_state.best_finalized_self = data.target_state.best_self; - data.target_latest_received_nonce = *proof.0.end(); - if let Some(target_latest_confirmed_received_nonce) = proof.1 { - data.target_latest_confirmed_received_nonce = - target_latest_confirmed_received_nonce; - } - data.submitted_messages_proofs.push(proof); - Ok(nonces) - } - - async fn require_source_header_on_target(&self, id: SourceHeaderIdOf) { - let mut data = self.data.lock(); - data.source_to_target_header_required = Some(id); - data.source_to_target_header_requirements.push(id); - (self.tick)(&mut *data); - } - - async fn estimate_delivery_transaction_in_source_tokens( - &self, - nonces: RangeInclusive, - _total_prepaid_nonces: MessageNonce, - total_dispatch_weight: Weight, - total_size: u32, - ) -> Result { - Ok(BASE_MESSAGE_DELIVERY_TRANSACTION_COST * (nonces.end() - nonces.start() + 1) + - total_dispatch_weight + - total_size as TestSourceChainBalance) - } - } - - fn run_loop_test( - data: TestClientData, - source_tick: Arc, - target_tick: Arc, - exit_signal: impl Future + 'static + Send, - ) -> TestClientData { - async_std::task::block_on(async { - let data = Arc::new(Mutex::new(data)); - - let source_client = TestSourceClient { data: data.clone(), tick: source_tick }; - let target_client = TestTargetClient { data: data.clone(), tick: target_tick }; - let _ = run( - Params { - lane: [0, 0, 0, 0], - source_tick: Duration::from_millis(100), - target_tick: Duration::from_millis(100), - reconnect_delay: Duration::from_millis(0), - stall_timeout: Duration::from_millis(60 * 1000), - delivery_params: MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: 4, - max_unconfirmed_nonces_at_target: 4, - max_messages_in_single_batch: 4, - max_messages_weight_in_single_batch: 4, - max_messages_size_in_single_batch: 4, - relay_strategy: AltruisticStrategy, - }, - }, - source_client, - target_client, - MetricsParams::disabled(), - exit_signal, - ) - .await; - let result = data.lock().clone(); - result - }) - } - - #[test] - fn message_lane_loop_is_able_to_recover_from_connection_errors() { - // with this configuration, source client will return Err, making source client - // reconnect. Then the target client will fail with Err + reconnect. Then we finally - // able to deliver messages. - let (exit_sender, exit_receiver) = unbounded(); - let result = run_loop_test( - TestClientData { - is_source_fails: true, - source_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: HeaderId(0, 0), - actual_best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - source_latest_generated_nonce: 1, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: HeaderId(0, 0), - actual_best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - target_latest_received_nonce: 0, - ..Default::default() - }, - Arc::new(|data: &mut TestClientData| { - if data.is_source_reconnected { - data.is_source_fails = false; - data.is_target_fails = true; - } - }), - Arc::new(move |data: &mut TestClientData| { - if data.is_target_reconnected { - data.is_target_fails = false; - } - if data.target_state.best_finalized_peer_at_best_self.0 < 10 { - data.target_state.best_finalized_peer_at_best_self = HeaderId( - data.target_state.best_finalized_peer_at_best_self.0 + 1, - data.target_state.best_finalized_peer_at_best_self.0 + 1, - ); - } - if !data.submitted_messages_proofs.is_empty() { - exit_sender.unbounded_send(()).unwrap(); - } - }), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - assert_eq!(result.submitted_messages_proofs, vec![(1..=1, None)]); - } - - #[test] - fn message_lane_loop_works() { - let (exit_sender, exit_receiver) = unbounded(); - let result = run_loop_test( - TestClientData { - source_state: ClientState { - best_self: HeaderId(10, 10), - best_finalized_self: HeaderId(10, 10), - best_finalized_peer_at_best_self: HeaderId(0, 0), - actual_best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - source_latest_generated_nonce: 10, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: HeaderId(0, 0), - actual_best_finalized_peer_at_best_self: HeaderId(0, 0), - }, - target_latest_received_nonce: 0, - ..Default::default() - }, - Arc::new(|data: &mut TestClientData| { - // blocks are produced on every tick - data.source_state.best_self = - HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.1 + 1); - data.source_state.best_finalized_self = data.source_state.best_self; - // headers relay must only be started when we need new target headers at source node - if data.target_to_source_header_required.is_some() { - assert!( - data.source_state.best_finalized_peer_at_best_self.0 < - data.target_state.best_self.0 - ); - data.target_to_source_header_required = None; - } - // syncing target headers -> source chain - if let Some(last_requirement) = data.target_to_source_header_requirements.last() { - if *last_requirement != data.source_state.best_finalized_peer_at_best_self { - data.source_state.best_finalized_peer_at_best_self = *last_requirement; - } - } - }), - Arc::new(move |data: &mut TestClientData| { - // blocks are produced on every tick - data.target_state.best_self = - HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); - data.target_state.best_finalized_self = data.target_state.best_self; - // headers relay must only be started when we need new source headers at target node - if data.source_to_target_header_required.is_some() { - assert!( - data.target_state.best_finalized_peer_at_best_self.0 < - data.source_state.best_self.0 - ); - data.source_to_target_header_required = None; - } - // syncing source headers -> target chain - if let Some(last_requirement) = data.source_to_target_header_requirements.last() { - if *last_requirement != data.target_state.best_finalized_peer_at_best_self { - data.target_state.best_finalized_peer_at_best_self = *last_requirement; - } - } - // if source has received all messages receiving confirmations => stop - if data.source_latest_confirmed_received_nonce == 10 { - exit_sender.unbounded_send(()).unwrap(); - } - }), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - // there are no strict restrictions on when reward confirmation should come - // (because `max_unconfirmed_nonces_at_target` is `100` in tests and this confirmation - // depends on the state of both clients) - // => we do not check it here - assert_eq!(result.submitted_messages_proofs[0].0, 1..=4); - assert_eq!(result.submitted_messages_proofs[1].0, 5..=8); - assert_eq!(result.submitted_messages_proofs[2].0, 9..=10); - assert!(!result.submitted_messages_receiving_proofs.is_empty()); - - // check that we have at least once required new source->target or target->source headers - assert!(!result.target_to_source_header_requirements.is_empty()); - assert!(!result.source_to_target_header_requirements.is_empty()); - } -} diff --git a/polkadot/bridges/relays/messages/src/message_race_delivery.rs b/polkadot/bridges/relays/messages/src/message_race_delivery.rs deleted file mode 100644 index dc994364f17..00000000000 --- a/polkadot/bridges/relays/messages/src/message_race_delivery.rs +++ /dev/null @@ -1,1076 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Message delivery race delivers proof-of-messages from "lane.source" to "lane.target". - -use std::{collections::VecDeque, marker::PhantomData, ops::RangeInclusive, time::Duration}; - -use async_trait::async_trait; -use futures::stream::FusedStream; - -use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; -use relay_utils::FailedClient; - -use crate::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{ - MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, - SourceClient as MessageLaneSourceClient, SourceClientState, - TargetClient as MessageLaneTargetClient, TargetClientState, - }, - message_race_loop::{ - MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, - TargetClient, TargetClientNonces, - }, - message_race_strategy::BasicStrategy, - metrics::MessageLaneLoopMetrics, - relay_strategy::{EnforcementStrategy, RelayMessagesBatchReference, RelayStrategy}, -}; - -/// Run message delivery race. -pub async fn run( - source_client: impl MessageLaneSourceClient

, - source_state_updates: impl FusedStream>, - target_client: impl MessageLaneTargetClient

, - target_state_updates: impl FusedStream>, - stall_timeout: Duration, - metrics_msg: Option, - params: MessageDeliveryParams, -) -> Result<(), FailedClient> { - crate::message_race_loop::run( - MessageDeliveryRaceSource { - client: source_client.clone(), - metrics_msg: metrics_msg.clone(), - _phantom: Default::default(), - }, - source_state_updates, - MessageDeliveryRaceTarget { - client: target_client.clone(), - metrics_msg, - _phantom: Default::default(), - }, - target_state_updates, - stall_timeout, - MessageDeliveryStrategy:: { - lane_source_client: source_client, - lane_target_client: target_client, - max_unrewarded_relayer_entries_at_target: params - .max_unrewarded_relayer_entries_at_target, - max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, - max_messages_in_single_batch: params.max_messages_in_single_batch, - max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch, - max_messages_size_in_single_batch: params.max_messages_size_in_single_batch, - relay_strategy: params.relay_strategy, - latest_confirmed_nonces_at_source: VecDeque::new(), - target_nonces: None, - strategy: BasicStrategy::new(), - }, - ) - .await -} - -/// Message delivery race. -struct MessageDeliveryRace

(std::marker::PhantomData

); - -impl MessageRace for MessageDeliveryRace

{ - type SourceHeaderId = SourceHeaderIdOf

; - type TargetHeaderId = TargetHeaderIdOf

; - - type MessageNonce = MessageNonce; - type Proof = P::MessagesProof; - - fn source_name() -> String { - format!("{}::MessagesDelivery", P::SOURCE_NAME) - } - - fn target_name() -> String { - format!("{}::MessagesDelivery", P::TARGET_NAME) - } -} - -/// Message delivery race source, which is a source of the lane. -struct MessageDeliveryRaceSource { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl SourceClient> for MessageDeliveryRaceSource -where - P: MessageLane, - C: MessageLaneSourceClient

, -{ - type Error = C::Error; - type NoncesRange = MessageDetailsMap; - type ProofParameters = MessageProofParameters; - - async fn nonces( - &self, - at_block: SourceHeaderIdOf

, - prev_latest_nonce: MessageNonce, - ) -> Result<(SourceHeaderIdOf

, SourceClientNonces), Self::Error> { - let (at_block, latest_generated_nonce) = - self.client.latest_generated_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = - self.client.latest_confirmed_received_nonce(at_block).await?; - - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_source_latest_generated_nonce::

(latest_generated_nonce); - metrics_msg.update_source_latest_confirmed_nonce::

(latest_confirmed_nonce); - } - - let new_nonces = if latest_generated_nonce > prev_latest_nonce { - self.client - .generated_message_details( - at_block.clone(), - prev_latest_nonce + 1..=latest_generated_nonce, - ) - .await? - } else { - MessageDetailsMap::new() - }; - - Ok(( - at_block, - SourceClientNonces { new_nonces, confirmed_nonce: Some(latest_confirmed_nonce) }, - )) - } - - async fn generate_proof( - &self, - at_block: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof_parameters: Self::ProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error> - { - self.client.prove_messages(at_block, nonces, proof_parameters).await - } -} - -/// Message delivery race target, which is a target of the lane. -struct MessageDeliveryRaceTarget { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl TargetClient> for MessageDeliveryRaceTarget -where - P: MessageLane, - C: MessageLaneTargetClient

, -{ - type Error = C::Error; - type TargetNoncesData = DeliveryRaceTargetNoncesData; - - async fn require_source_header(&self, id: SourceHeaderIdOf

) { - self.client.require_source_header_on_target(id).await - } - - async fn nonces( - &self, - at_block: TargetHeaderIdOf

, - update_metrics: bool, - ) -> Result<(TargetHeaderIdOf

, TargetClientNonces), Self::Error> - { - let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = - self.client.latest_confirmed_received_nonce(at_block).await?; - let (at_block, unrewarded_relayers) = - self.client.unrewarded_relayers_state(at_block).await?; - - if update_metrics { - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_target_latest_received_nonce::

(latest_received_nonce); - metrics_msg.update_target_latest_confirmed_nonce::

(latest_confirmed_nonce); - } - } - - Ok(( - at_block, - TargetClientNonces { - latest_nonce: latest_received_nonce, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: latest_confirmed_nonce, - unrewarded_relayers, - }, - }, - )) - } - - async fn submit_proof( - &self, - generated_at_block: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, Self::Error> { - self.client.submit_messages_proof(generated_at_block, nonces, proof).await - } -} - -/// Additional nonces data from the target client used by message delivery race. -#[derive(Debug, Clone)] -struct DeliveryRaceTargetNoncesData { - /// The latest nonce that we know: (1) has been delivered to us (2) has been confirmed - /// back to the source node (by confirmations race) and (3) relayer has received - /// reward for (and this has been confirmed by the message delivery race). - confirmed_nonce: MessageNonce, - /// State of the unrewarded relayers set at the target node. - unrewarded_relayers: UnrewardedRelayersState, -} - -/// Messages delivery strategy. -struct MessageDeliveryStrategy { - /// The client that is connected to the message lane source node. - lane_source_client: SC, - /// The client that is connected to the message lane target node. - lane_target_client: TC, - /// Maximal unrewarded relayer entries at target client. - max_unrewarded_relayer_entries_at_target: MessageNonce, - /// Maximal unconfirmed nonces at target client. - max_unconfirmed_nonces_at_target: MessageNonce, - /// Maximal number of messages in the single delivery transaction. - max_messages_in_single_batch: MessageNonce, - /// Maximal cumulative messages weight in the single delivery transaction. - max_messages_weight_in_single_batch: Weight, - /// Maximal messages size in the single delivery transaction. - max_messages_size_in_single_batch: u32, - /// Relayer operating mode. - relay_strategy: Strategy, - /// Latest confirmed nonces at the source client + the header id where we have first met this - /// nonce. - latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf

, MessageNonce)>, - /// Target nonces from the source client. - target_nonces: Option>, - /// Basic delivery strategy. - strategy: MessageDeliveryStrategyBase

, -} - -type MessageDeliveryStrategyBase

= BasicStrategy< -

::SourceHeaderNumber, -

::SourceHeaderHash, -

::TargetHeaderNumber, -

::TargetHeaderHash, - MessageDetailsMap<

::SourceChainBalance>, -

::MessagesProof, ->; - -impl std::fmt::Debug - for MessageDeliveryStrategy -{ - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("MessageDeliveryStrategy") - .field( - "max_unrewarded_relayer_entries_at_target", - &self.max_unrewarded_relayer_entries_at_target, - ) - .field("max_unconfirmed_nonces_at_target", &self.max_unconfirmed_nonces_at_target) - .field("max_messages_in_single_batch", &self.max_messages_in_single_batch) - .field("max_messages_weight_in_single_batch", &self.max_messages_weight_in_single_batch) - .field("max_messages_size_in_single_batch", &self.max_messages_size_in_single_batch) - .field("latest_confirmed_nonces_at_source", &self.latest_confirmed_nonces_at_source) - .field("target_nonces", &self.target_nonces) - .field("strategy", &self.strategy) - .finish() - } -} - -impl MessageDeliveryStrategy { - /// Returns total weight of all undelivered messages. - fn total_queued_dispatch_weight(&self) -> Weight { - self.strategy - .source_queue() - .iter() - .flat_map(|(_, range)| range.values().map(|details| details.dispatch_weight)) - .fold(0, |total, weight| total.saturating_add(weight)) - } -} - -#[async_trait] -impl - RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> - for MessageDeliveryStrategy -where - P: MessageLane, - SC: MessageLaneSourceClient

, - TC: MessageLaneTargetClient

, -{ - type SourceNoncesRange = MessageDetailsMap; - type ProofParameters = MessageProofParameters; - type TargetNoncesData = DeliveryRaceTargetNoncesData; - - fn is_empty(&self) -> bool { - self.strategy.is_empty() - } - - fn required_source_header_at_target( - &self, - current_best: &SourceHeaderIdOf

, - ) -> Option> { - let header_required_for_messages_delivery = - self.strategy.required_source_header_at_target(current_best); - let header_required_for_reward_confirmations_delivery = - self.latest_confirmed_nonces_at_source.back().map(|(id, _)| id.clone()); - match ( - header_required_for_messages_delivery, - header_required_for_reward_confirmations_delivery, - ) { - (Some(id1), Some(id2)) => Some(if id1.0 > id2.0 { id1 } else { id2 }), - (a, b) => a.or(b), - } - } - - fn best_at_source(&self) -> Option { - self.strategy.best_at_source() - } - - fn best_at_target(&self) -> Option { - self.strategy.best_at_target() - } - - fn source_nonces_updated( - &mut self, - at_block: SourceHeaderIdOf

, - nonces: SourceClientNonces, - ) { - if let Some(confirmed_nonce) = nonces.confirmed_nonce { - let is_confirmed_nonce_updated = self - .latest_confirmed_nonces_at_source - .back() - .map(|(_, prev_nonce)| *prev_nonce != confirmed_nonce) - .unwrap_or(true); - if is_confirmed_nonce_updated { - self.latest_confirmed_nonces_at_source - .push_back((at_block.clone(), confirmed_nonce)); - } - } - self.strategy.source_nonces_updated(at_block, nonces) - } - - fn best_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, TargetHeaderIdOf

, P::MessagesProof>, - ) { - // best target nonces must always be ge than finalized target nonces - let mut target_nonces = self.target_nonces.take().unwrap_or_else(|| nonces.clone()); - target_nonces.nonces_data = nonces.nonces_data.clone(); - target_nonces.latest_nonce = std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); - self.target_nonces = Some(target_nonces); - - self.strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () }, - race_state, - ) - } - - fn finalized_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, TargetHeaderIdOf

, P::MessagesProof>, - ) { - if let Some(ref best_finalized_source_header_id_at_best_target) = - race_state.best_finalized_source_header_id_at_best_target - { - let oldest_header_number_to_keep = best_finalized_source_header_id_at_best_target.0; - while self - .latest_confirmed_nonces_at_source - .front() - .map(|(id, _)| id.0 < oldest_header_number_to_keep) - .unwrap_or(false) - { - self.latest_confirmed_nonces_at_source.pop_front(); - } - } - - if let Some(ref mut target_nonces) = self.target_nonces { - target_nonces.latest_nonce = - std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); - } - - self.strategy.finalized_target_nonces_updated( - TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () }, - race_state, - ) - } - - async fn select_nonces_to_deliver( - &mut self, - race_state: RaceState, TargetHeaderIdOf

, P::MessagesProof>, - ) -> Option<(RangeInclusive, Self::ProofParameters)> { - let best_finalized_source_header_id_at_best_target = - race_state.best_finalized_source_header_id_at_best_target.clone()?; - let latest_confirmed_nonce_at_source = self - .latest_confirmed_nonces_at_source - .iter() - .take_while(|(id, _)| id.0 <= best_finalized_source_header_id_at_best_target.0) - .last() - .map(|(_, nonce)| *nonce)?; - let target_nonces = self.target_nonces.as_ref()?; - - // There's additional condition in the message delivery race: target would reject messages - // if there are too much unconfirmed messages at the inbound lane. - - // The receiving race is responsible to deliver confirmations back to the source chain. So - // if there's a lot of unconfirmed messages, let's wait until it'll be able to do its job. - let latest_received_nonce_at_target = target_nonces.latest_nonce; - let confirmations_missing = - latest_received_nonce_at_target.checked_sub(latest_confirmed_nonce_at_source); - match confirmations_missing { - Some(confirmations_missing) - if confirmations_missing >= self.max_unconfirmed_nonces_at_target => - { - log::debug!( - target: "bridge", - "Cannot deliver any more messages from {} to {}. Too many unconfirmed nonces \ - at target: target.latest_received={:?}, source.latest_confirmed={:?}, max={:?}", - MessageDeliveryRace::

::source_name(), - MessageDeliveryRace::

::target_name(), - latest_received_nonce_at_target, - latest_confirmed_nonce_at_source, - self.max_unconfirmed_nonces_at_target, - ); - - return None - }, - _ => (), - } - - // Ok - we may have new nonces to deliver. But target may still reject new messages, because - // we haven't notified it that (some) messages have been confirmed. So we may want to - // include updated `source.latest_confirmed` in the proof. - // - // Important note: we're including outbound state lane proof whenever there are unconfirmed - // nonces on the target chain. Other strategy is to include it only if it's absolutely - // necessary. - let latest_confirmed_nonce_at_target = target_nonces.nonces_data.confirmed_nonce; - let outbound_state_proof_required = - latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source; - - // The target node would also reject messages if there are too many entries in the - // "unrewarded relayers" set. If we are unable to prove new rewards to the target node, then - // we should wait for confirmations race. - let unrewarded_relayer_entries_limit_reached = - target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries >= - self.max_unrewarded_relayer_entries_at_target; - if unrewarded_relayer_entries_limit_reached { - // so there are already too many unrewarded relayer entries in the set - // - // => check if we can prove enough rewards. If not, we should wait for more rewards to - // be paid - let number_of_rewards_being_proved = - latest_confirmed_nonce_at_source.saturating_sub(latest_confirmed_nonce_at_target); - let enough_rewards_being_proved = number_of_rewards_being_proved >= - target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry; - if !enough_rewards_being_proved { - return None - } - } - - // If we're here, then the confirmations race did its job && sending side now knows that - // messages have been delivered. Now let's select nonces that we want to deliver. - // - // We may deliver at most: - // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - - // latest_confirmed_nonce_at_target) - // - // messages in the batch. But since we're including outbound state proof in the batch, then - // it may be increased to: - // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - - // latest_confirmed_nonce_at_source) - let future_confirmed_nonce_at_target = if outbound_state_proof_required { - latest_confirmed_nonce_at_source - } else { - latest_confirmed_nonce_at_target - }; - let max_nonces = latest_received_nonce_at_target - .checked_sub(future_confirmed_nonce_at_target) - .and_then(|diff| self.max_unconfirmed_nonces_at_target.checked_sub(diff)) - .unwrap_or_default(); - let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); - let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; - let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; - let lane_source_client = self.lane_source_client.clone(); - let lane_target_client = self.lane_target_client.clone(); - - let maximal_source_queue_index = - self.strategy.maximal_available_source_queue_index(race_state)?; - let previous_total_dispatch_weight = self.total_queued_dispatch_weight(); - let source_queue = self.strategy.source_queue(); - - let reference = RelayMessagesBatchReference { - max_messages_in_this_batch: max_nonces, - max_messages_weight_in_single_batch, - max_messages_size_in_single_batch, - lane_source_client: lane_source_client.clone(), - lane_target_client: lane_target_client.clone(), - nonces_queue: source_queue.clone(), - nonces_queue_range: 0..maximal_source_queue_index + 1, - }; - - let mut strategy = EnforcementStrategy::new(self.relay_strategy.clone()); - let range_end = strategy.decide(reference).await?; - - let range_begin = source_queue[0].1.begin(); - let selected_nonces = range_begin..=range_end; - self.strategy.remove_le_nonces_from_source_queue(range_end); - - let new_total_dispatch_weight = self.total_queued_dispatch_weight(); - let dispatch_weight = previous_total_dispatch_weight - new_total_dispatch_weight; - - Some(( - selected_nonces, - MessageProofParameters { outbound_state_proof_required, dispatch_weight }, - )) - } -} - -impl NoncesRange for MessageDetailsMap { - fn begin(&self) -> MessageNonce { - self.keys().next().cloned().unwrap_or_default() - } - - fn end(&self) -> MessageNonce { - self.keys().next_back().cloned().unwrap_or_default() - } - - fn greater_than(mut self, nonce: MessageNonce) -> Option { - let gte = self.split_off(&(nonce + 1)); - if gte.is_empty() { - None - } else { - Some(gte) - } - } -} - -#[cfg(test)] -mod tests { - use bp_runtime::messages::DispatchFeePayment; - - use crate::{ - message_lane_loop::{ - tests::{ - header_id, TestMessageLane, TestMessagesProof, TestSourceChainBalance, - TestSourceClient, TestSourceHeaderId, TestTargetClient, TestTargetHeaderId, - BASE_MESSAGE_DELIVERY_TRANSACTION_COST, CONFIRMATION_TRANSACTION_COST, - }, - MessageDetails, RelayerMode, - }, - relay_strategy::MixStrategy, - }; - - use super::*; - - const DEFAULT_DISPATCH_WEIGHT: Weight = 1; - const DEFAULT_SIZE: u32 = 1; - const DEFAULT_REWARD: TestSourceChainBalance = CONFIRMATION_TRANSACTION_COST + - BASE_MESSAGE_DELIVERY_TRANSACTION_COST + - DEFAULT_DISPATCH_WEIGHT + - (DEFAULT_SIZE as TestSourceChainBalance); - - type TestRaceState = RaceState; - type TestStrategy = - MessageDeliveryStrategy; - - fn source_nonces( - new_nonces: RangeInclusive, - confirmed_nonce: MessageNonce, - reward: TestSourceChainBalance, - dispatch_fee_payment: DispatchFeePayment, - ) -> SourceClientNonces> { - SourceClientNonces { - new_nonces: new_nonces - .into_iter() - .map(|nonce| { - ( - nonce, - MessageDetails { - dispatch_weight: DEFAULT_DISPATCH_WEIGHT, - size: DEFAULT_SIZE, - reward, - dispatch_fee_payment, - }, - ) - }) - .into_iter() - .collect(), - confirmed_nonce: Some(confirmed_nonce), - } - } - - fn prepare_strategy() -> (TestRaceState, TestStrategy) { - let mut race_state = RaceState { - best_finalized_source_header_id_at_source: Some(header_id(1)), - best_finalized_source_header_id_at_best_target: Some(header_id(1)), - best_target_header_id: Some(header_id(1)), - best_finalized_target_header_id: Some(header_id(1)), - nonces_to_submit: None, - nonces_submitted: None, - }; - - let mut race_strategy = TestStrategy { - max_unrewarded_relayer_entries_at_target: 4, - max_unconfirmed_nonces_at_target: 4, - max_messages_in_single_batch: 4, - max_messages_weight_in_single_batch: 4, - max_messages_size_in_single_batch: 4, - latest_confirmed_nonces_at_source: vec![(header_id(1), 19)].into_iter().collect(), - lane_source_client: TestSourceClient::default(), - lane_target_client: TestTargetClient::default(), - target_nonces: Some(TargetClientNonces { - latest_nonce: 19, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState { - unrewarded_relayer_entries: 0, - messages_in_oldest_entry: 0, - total_messages: 0, - }, - }, - }), - strategy: BasicStrategy::new(), - relay_strategy: MixStrategy::new(RelayerMode::Altruistic), - }; - - race_strategy.strategy.source_nonces_updated( - header_id(1), - source_nonces(20..=23, 19, DEFAULT_REWARD, DispatchFeePayment::AtSourceChain), - ); - - let target_nonces = TargetClientNonces { latest_nonce: 19, nonces_data: () }; - race_strategy - .strategy - .best_target_nonces_updated(target_nonces.clone(), &mut race_state); - race_strategy - .strategy - .finalized_target_nonces_updated(target_nonces, &mut race_state); - - (race_state, race_strategy) - } - - fn proof_parameters(state_required: bool, weight: Weight) -> MessageProofParameters { - MessageProofParameters { - outbound_state_proof_required: state_required, - dispatch_weight: weight, - } - } - - #[test] - fn weights_map_works_as_nonces_range() { - fn build_map( - range: RangeInclusive, - ) -> MessageDetailsMap { - range - .map(|idx| { - ( - idx, - MessageDetails { - dispatch_weight: idx, - size: idx as _, - reward: idx as _, - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - }, - ) - }) - .collect() - } - - let map = build_map(20..=30); - - assert_eq!(map.begin(), 20); - assert_eq!(map.end(), 30); - assert_eq!(map.clone().greater_than(10), Some(build_map(20..=30))); - assert_eq!(map.clone().greater_than(19), Some(build_map(20..=30))); - assert_eq!(map.clone().greater_than(20), Some(build_map(21..=30))); - assert_eq!(map.clone().greater_than(25), Some(build_map(26..=30))); - assert_eq!(map.clone().greater_than(29), Some(build_map(30..=30))); - assert_eq!(map.greater_than(30), None); - } - - #[async_std::test] - async fn message_delivery_strategy_selects_messages_to_deliver() { - let (state, mut strategy) = prepare_strategy(); - - // both sides are ready to relay new messages - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(false, 4))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_selects_nothing_if_too_many_confirmations_missing() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unconfirmed_nonces_at_target` messages on target, - // we need to wait until confirmations will be delivered by receiving race - strategy.latest_confirmed_nonces_at_source = vec![( - header_id(1), - strategy.target_nonces.as_ref().unwrap().latest_nonce - - strategy.max_unconfirmed_nonces_at_target, - )] - .into_iter() - .collect(); - assert_eq!(strategy.select_nonces_to_deliver(state).await, None); - } - - #[async_std::test] - async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() - { - let (state, mut strategy) = prepare_strategy(); - - // if there are new confirmed nonces on source, we want to relay this information - // to target to prune rewards queue - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_selects_nothing_if_there_are_too_many_unrewarded_relayers() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to wait until rewards will be paid - { - let mut unrewarded_relayers = - &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = - strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 4; - } - assert_eq!(strategy.select_nonces_to_deliver(state).await, None); - } - - #[async_std::test] - async fn message_delivery_strategy_selects_nothing_if_proved_rewards_is_not_enough_to_remove_oldest_unrewarded_entry( - ) { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - { - let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; - nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; - let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = - strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 4; - } - assert_eq!(strategy.select_nonces_to_deliver(state).await, None); - } - - #[async_std::test] - async fn message_delivery_strategy_includes_outbound_state_proof_if_proved_rewards_is_enough() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - { - let mut nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; - nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 3; - let mut unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = - strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 3; - } - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_weight() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max weight - strategy.max_messages_weight_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight( - ) { - let (state, mut strategy) = prepare_strategy(); - - // first message doesn't fit in the batch, because it has weight (10) that overflows max - // weight (4) - strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().dispatch_weight = 10; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=20), proof_parameters(false, 10))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_size() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max weight - strategy.max_messages_size_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size( - ) { - let (state, mut strategy) = prepare_strategy(); - - // first message doesn't fit in the batch, because it has weight (10) that overflows max - // weight (4) - strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=20), proof_parameters(false, 1))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max number of messages - // limit - strategy.max_messages_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces( - ) { - let (state, mut strategy) = prepare_strategy(); - - // 1 delivery confirmation from target to source is still missing, so we may only - // relay 3 new messages - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = - vec![(header_id(1), prev_confirmed_nonce_at_source - 1)].into_iter().collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_waits_for_confirmed_nonce_header_to_appear_on_target() { - // 1 delivery confirmation from target to source is still missing, so we may deliver - // reward confirmation with our message delivery transaction. But the problem is that - // the reward has been paid at header 2 && this header is still unknown to target node. - // - // => so we can't deliver more than 3 messages - let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![ - (header_id(1), prev_confirmed_nonce_at_source - 1), - (header_id(2), prev_confirmed_nonce_at_source), - ] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - - // the same situation, but the header 2 is known to the target node, so we may deliver - // reward confirmation - let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![ - (header_id(1), prev_confirmed_nonce_at_source - 1), - (header_id(2), prev_confirmed_nonce_at_source), - ] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - state.best_finalized_source_header_id_at_source = Some(header_id(2)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[async_std::test] - async fn source_header_is_required_when_confirmations_are_required() { - // let's prepare situation when: - // - all messages [20; 23] have been generated at source block#1; - let (mut state, mut strategy) = prepare_strategy(); - // - // - messages [20; 21] have been delivered, but messages [11; 20] can't be delivered because - // of unrewarded relayers vector capacity; - strategy.max_unconfirmed_nonces_at_target = 2; - assert_eq!( - strategy.select_nonces_to_deliver(state.clone()).await, - Some(((20..=21), proof_parameters(false, 2))) - ); - strategy.finalized_target_nonces_updated( - TargetClientNonces { - latest_nonce: 21, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 2, - total_messages: 2, - }, - }, - }, - &mut state, - ); - assert_eq!(strategy.select_nonces_to_deliver(state).await, None); - // - // - messages [1; 10] receiving confirmation has been delivered at source block#2; - strategy.source_nonces_updated( - header_id(2), - SourceClientNonces { new_nonces: MessageDetailsMap::new(), confirmed_nonce: Some(21) }, - ); - // - // - so now we'll need to relay source block#11 to be able to accept messages [11; 20]. - assert_eq!(strategy.required_source_header_at_target(&header_id(1)), Some(header_id(2))); - } - - #[async_std::test] - async fn rational_relayer_is_delivering_messages_if_cost_is_equal_to_reward() { - let (state, mut strategy) = prepare_strategy(); - strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational); - - // so now we have: - // - 20..=23 with reward = cost - // => strategy shall select all 20..=23 - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(false, 4))) - ); - } - - #[async_std::test] - async fn rational_relayer_is_not_delivering_messages_if_cost_is_larger_than_reward() { - let (mut state, mut strategy) = prepare_strategy(); - let nonces = source_nonces( - 24..=25, - 19, - DEFAULT_REWARD - BASE_MESSAGE_DELIVERY_TRANSACTION_COST, - DispatchFeePayment::AtSourceChain, - ); - strategy.strategy.source_nonces_updated(header_id(2), nonces); - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational); - - // so now we have: - // - 20..=23 with reward = cost - // - 24..=25 with reward less than cost - // => strategy shall only select 20..=23 - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(false, 4))) - ); - } - - #[async_std::test] - async fn rational_relayer_is_delivering_unpaid_messages() { - async fn test_with_dispatch_fee_payment( - dispatch_fee_payment: DispatchFeePayment, - ) -> Option<(RangeInclusive, MessageProofParameters)> { - let (mut state, mut strategy) = prepare_strategy(); - let nonces = source_nonces( - 24..=24, - 19, - DEFAULT_REWARD - DEFAULT_DISPATCH_WEIGHT, - dispatch_fee_payment, - ); - strategy.strategy.source_nonces_updated(header_id(2), nonces); - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - strategy.max_unrewarded_relayer_entries_at_target = 100; - strategy.max_unconfirmed_nonces_at_target = 100; - strategy.max_messages_in_single_batch = 100; - strategy.max_messages_weight_in_single_batch = 100; - strategy.max_messages_size_in_single_batch = 100; - strategy.relay_strategy = MixStrategy::new(RelayerMode::Rational); - - // so now we have: - // - 20..=23 with reward = cost - // - 24..=24 with reward less than cost, but we're deducting `DEFAULT_DISPATCH_WEIGHT` - // from the cost, so it should be fine; - // => when MSG#24 fee is paid at the target chain, strategy shall select all 20..=24 - // => when MSG#25 fee is paid at the source chain, strategy shall only select 20..=23 - strategy.select_nonces_to_deliver(state).await - } - - assert_eq!( - test_with_dispatch_fee_payment(DispatchFeePayment::AtTargetChain).await, - Some(((20..=24), proof_parameters(false, 5))) - ); - assert_eq!( - test_with_dispatch_fee_payment(DispatchFeePayment::AtSourceChain).await, - Some(((20..=23), proof_parameters(false, 4))) - ); - } - - #[async_std::test] - async fn relayer_uses_flattened_view_of_the_source_queue_to_select_nonces() { - // Real scenario that has happened on test deployments: - // 1) relayer witnessed M1 at block 1 => it has separate entry in the `source_queue` - // 2) relayer witnessed M2 at block 2 => it has separate entry in the `source_queue` - // 3) if block 2 is known to the target node, then both M1 and M2 are selected for single - // delivery, even though weight(M1+M2) > larger than largest allowed weight - // - // This was happening because selector (`select_nonces_for_delivery_transaction`) has been - // called for every `source_queue` entry separately without preserving any context. - let (mut state, mut strategy) = prepare_strategy(); - let nonces = source_nonces(24..=25, 19, DEFAULT_REWARD, DispatchFeePayment::AtSourceChain); - strategy.strategy.source_nonces_updated(header_id(2), nonces); - strategy.max_unrewarded_relayer_entries_at_target = 100; - strategy.max_unconfirmed_nonces_at_target = 100; - strategy.max_messages_in_single_batch = 5; - strategy.max_messages_weight_in_single_batch = 100; - strategy.max_messages_size_in_single_batch = 100; - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=24), proof_parameters(false, 5))) - ); - } -} diff --git a/polkadot/bridges/relays/messages/src/message_race_loop.rs b/polkadot/bridges/relays/messages/src/message_race_loop.rs deleted file mode 100644 index a7254f70ee4..00000000000 --- a/polkadot/bridges/relays/messages/src/message_race_loop.rs +++ /dev/null @@ -1,634 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Loop that is serving single race within message lane. This could be -//! message delivery race, receiving confirmations race or processing -//! confirmations race. -//! -//! The idea of the race is simple - we have `nonce`-s on source and target -//! nodes. We're trying to prove that the source node has this nonce (and -//! associated data - like messages, lane state, etc) to the target node by -//! generating and submitting proof. - -use crate::message_lane_loop::ClientState; - -use async_trait::async_trait; -use bp_messages::MessageNonce; -use futures::{ - future::FutureExt, - stream::{FusedStream, StreamExt}, -}; -use relay_utils::{process_future_result, retry_backoff, FailedClient, MaybeConnectionError}; -use std::{ - fmt::Debug, - ops::RangeInclusive, - time::{Duration, Instant}, -}; - -/// One of races within lane. -pub trait MessageRace { - /// Header id of the race source. - type SourceHeaderId: Debug + Clone + PartialEq; - /// Header id of the race source. - type TargetHeaderId: Debug + Clone + PartialEq; - - /// Message nonce used in the race. - type MessageNonce: Debug + Clone; - /// Proof that is generated and delivered in this race. - type Proof: Debug + Clone; - - /// Name of the race source. - fn source_name() -> String; - /// Name of the race target. - fn target_name() -> String; -} - -/// State of race source client. -type SourceClientState

= - ClientState<

::SourceHeaderId,

::TargetHeaderId>; - -/// State of race target client. -type TargetClientState

= - ClientState<

::TargetHeaderId,

::SourceHeaderId>; - -/// Inclusive nonces range. -pub trait NoncesRange: Debug + Sized { - /// Get begin of the range. - fn begin(&self) -> MessageNonce; - /// Get end of the range. - fn end(&self) -> MessageNonce; - /// Returns new range with current range nonces that are greater than the passed `nonce`. - /// If there are no such nonces, `None` is returned. - fn greater_than(self, nonce: MessageNonce) -> Option; -} - -/// Nonces on the race source client. -#[derive(Debug, Clone)] -pub struct SourceClientNonces { - /// New nonces range known to the client. `New` here means all nonces generated after - /// `prev_latest_nonce` passed to the `SourceClient::nonces` method. - pub new_nonces: NoncesRange, - /// The latest nonce that is confirmed to the bridged client. This nonce only makes - /// sense in some races. In other races it is `None`. - pub confirmed_nonce: Option, -} - -/// Nonces on the race target client. -#[derive(Debug, Clone)] -pub struct TargetClientNonces { - /// The latest nonce that is known to the target client. - pub latest_nonce: MessageNonce, - /// Additional data from target node that may be used by the race. - pub nonces_data: TargetNoncesData, -} - -/// One of message lane clients, which is source client for the race. -#[async_trait] -pub trait SourceClient { - /// Type of error these clients returns. - type Error: std::fmt::Debug + MaybeConnectionError; - /// Type of nonces range returned by the source client. - type NoncesRange: NoncesRange; - /// Additional proof parameters required to generate proof. - type ProofParameters; - - /// Return nonces that are known to the source client. - async fn nonces( - &self, - at_block: P::SourceHeaderId, - prev_latest_nonce: MessageNonce, - ) -> Result<(P::SourceHeaderId, SourceClientNonces), Self::Error>; - /// Generate proof for delivering to the target client. - async fn generate_proof( - &self, - at_block: P::SourceHeaderId, - nonces: RangeInclusive, - proof_parameters: Self::ProofParameters, - ) -> Result<(P::SourceHeaderId, RangeInclusive, P::Proof), Self::Error>; -} - -/// One of message lane clients, which is target client for the race. -#[async_trait] -pub trait TargetClient { - /// Type of error these clients returns. - type Error: std::fmt::Debug + MaybeConnectionError; - /// Type of the additional data from the target client, used by the race. - type TargetNoncesData: std::fmt::Debug; - - /// Ask headers relay to relay finalized headers up to (and including) given header - /// from race source to race target. - async fn require_source_header(&self, id: P::SourceHeaderId); - - /// Return nonces that are known to the target client. - async fn nonces( - &self, - at_block: P::TargetHeaderId, - update_metrics: bool, - ) -> Result<(P::TargetHeaderId, TargetClientNonces), Self::Error>; - /// Submit proof to the target client. - async fn submit_proof( - &self, - generated_at_block: P::SourceHeaderId, - nonces: RangeInclusive, - proof: P::Proof, - ) -> Result, Self::Error>; -} - -/// Race strategy. -#[async_trait] -pub trait RaceStrategy: Debug { - /// Type of nonces range expected from the source client. - type SourceNoncesRange: NoncesRange; - /// Additional proof parameters required to generate proof. - type ProofParameters; - /// Additional data expected from the target client. - type TargetNoncesData; - - /// Should return true if nothing has to be synced. - fn is_empty(&self) -> bool; - /// Return id of source header that is required to be on target to continue synchronization. - fn required_source_header_at_target( - &self, - current_best: &SourceHeaderId, - ) -> Option; - /// Return the best nonce at source node. - /// - /// `Some` is returned only if we are sure that the value is greater or equal - /// than the result of `best_at_target`. - fn best_at_source(&self) -> Option; - /// Return the best nonce at target node. - /// - /// May return `None` if value is yet unknown. - fn best_at_target(&self) -> Option; - - /// Called when nonces are updated at source node of the race. - fn source_nonces_updated( - &mut self, - at_block: SourceHeaderId, - nonces: SourceClientNonces, - ); - /// Called when best nonces are updated at target node of the race. - fn best_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, - ); - /// Called when finalized nonces are updated at target node of the race. - fn finalized_target_nonces_updated( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RaceState, - ); - /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated - /// data) from source to target node. - /// Additionally, parameters required to generate proof are returned. - async fn select_nonces_to_deliver( - &mut self, - race_state: RaceState, - ) -> Option<(RangeInclusive, Self::ProofParameters)>; -} - -/// State of the race. -#[derive(Debug, Clone)] -pub struct RaceState { - /// Best finalized source header id at the source client. - pub best_finalized_source_header_id_at_source: Option, - /// Best finalized source header id at the best block on the target - /// client (at the `best_finalized_source_header_id_at_best_target`). - pub best_finalized_source_header_id_at_best_target: Option, - /// The best header id at the target client. - pub best_target_header_id: Option, - /// Best finalized header id at the target client. - pub best_finalized_target_header_id: Option, - /// Range of nonces that we have selected to submit. - pub nonces_to_submit: Option<(SourceHeaderId, RangeInclusive, Proof)>, - /// Range of nonces that is currently submitted. - pub nonces_submitted: Option>, -} - -/// Run race loop until connection with target or source node is lost. -pub async fn run, TC: TargetClient

>( - race_source: SC, - race_source_updated: impl FusedStream>, - race_target: TC, - race_target_updated: impl FusedStream>, - stall_timeout: Duration, - mut strategy: impl RaceStrategy< - P::SourceHeaderId, - P::TargetHeaderId, - P::Proof, - SourceNoncesRange = SC::NoncesRange, - ProofParameters = SC::ProofParameters, - TargetNoncesData = TC::TargetNoncesData, - >, -) -> Result<(), FailedClient> { - let mut progress_context = Instant::now(); - let mut race_state = RaceState::default(); - let mut stall_countdown = Instant::now(); - - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = true; - let mut source_nonces_required = false; - let source_nonces = futures::future::Fuse::terminated(); - let source_generate_proof = futures::future::Fuse::terminated(); - let source_go_offline_future = futures::future::Fuse::terminated(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = true; - let mut target_best_nonces_required = false; - let mut target_finalized_nonces_required = false; - let target_best_nonces = futures::future::Fuse::terminated(); - let target_finalized_nonces = futures::future::Fuse::terminated(); - let target_submit_proof = futures::future::Fuse::terminated(); - let target_go_offline_future = futures::future::Fuse::terminated(); - - futures::pin_mut!( - race_source_updated, - source_nonces, - source_generate_proof, - source_go_offline_future, - race_target_updated, - target_best_nonces, - target_finalized_nonces, - target_submit_proof, - target_go_offline_future, - ); - - loop { - futures::select! { - // when headers ids are updated - source_state = race_source_updated.next() => { - if let Some(source_state) = source_state { - let is_source_state_updated = race_state.best_finalized_source_header_id_at_source.as_ref() - != Some(&source_state.best_finalized_self); - if is_source_state_updated { - source_nonces_required = true; - race_state.best_finalized_source_header_id_at_source = Some(source_state.best_finalized_self); - } - } - }, - target_state = race_target_updated.next() => { - if let Some(target_state) = target_state { - let is_target_best_state_updated = race_state.best_target_header_id.as_ref() - != Some(&target_state.best_self); - - if is_target_best_state_updated { - target_best_nonces_required = true; - race_state.best_target_header_id = Some(target_state.best_self); - race_state.best_finalized_source_header_id_at_best_target - = Some(target_state.best_finalized_peer_at_best_self); - } - - let is_target_finalized_state_updated = race_state.best_finalized_target_header_id.as_ref() - != Some(&target_state.best_finalized_self); - if is_target_finalized_state_updated { - target_finalized_nonces_required = true; - race_state.best_finalized_target_header_id = Some(target_state.best_finalized_self); - } - } - }, - - // when nonces are updated - nonces = source_nonces => { - source_nonces_required = false; - - source_client_is_online = process_future_result( - nonces, - &mut source_retry_backoff, - |(at_block, nonces)| { - log::debug!( - target: "bridge", - "Received nonces from {}: {:?}", - P::source_name(), - nonces, - ); - - strategy.source_nonces_updated(at_block, nonces); - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving nonces from {}", P::source_name()), - ).fail_if_connection_error(FailedClient::Source)?; - - // ask for more headers if we have nonces to deliver and required headers are missing - let required_source_header_id = race_state - .best_finalized_source_header_id_at_best_target - .as_ref() - .and_then(|best|strategy.required_source_header_at_target(best)); - if let Some(required_source_header_id) = required_source_header_id { - race_target.require_source_header(required_source_header_id).await; - } - }, - nonces = target_best_nonces => { - target_best_nonces_required = false; - - target_client_is_online = process_future_result( - nonces, - &mut target_retry_backoff, - |(_, nonces)| { - log::debug!( - target: "bridge", - "Received best nonces from {}: {:?}", - P::target_name(), - nonces, - ); - - let prev_best_at_target = strategy.best_at_target(); - strategy.best_target_nonces_updated(nonces, &mut race_state); - if strategy.best_at_target() != prev_best_at_target { - stall_countdown = Instant::now(); - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best nonces from {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - nonces = target_finalized_nonces => { - target_finalized_nonces_required = false; - - target_client_is_online = process_future_result( - nonces, - &mut target_retry_backoff, - |(_, nonces)| { - log::debug!( - target: "bridge", - "Received finalized nonces from {}: {:?}", - P::target_name(), - nonces, - ); - - strategy.finalized_target_nonces_updated(nonces, &mut race_state); - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving finalized nonces from {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - - // proof generation and submission - proof = source_generate_proof => { - source_client_is_online = process_future_result( - proof, - &mut source_retry_backoff, - |(at_block, nonces_range, proof)| { - log::debug!( - target: "bridge", - "Received proof for nonces in range {:?} from {}", - nonces_range, - P::source_name(), - ); - - race_state.nonces_to_submit = Some((at_block, nonces_range, proof)); - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error generating proof at {}", P::source_name()), - ).fail_if_connection_error(FailedClient::Source)?; - }, - proof_submit_result = target_submit_proof => { - target_client_is_online = process_future_result( - proof_submit_result, - &mut target_retry_backoff, - |nonces_range| { - log::debug!( - target: "bridge", - "Successfully submitted proof of nonces {:?} to {}", - nonces_range, - P::target_name(), - ); - - race_state.nonces_to_submit = None; - race_state.nonces_submitted = Some(nonces_range); - stall_countdown = Instant::now(); - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error submitting proof {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - - // when we're ready to retry request - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - } - - progress_context = print_race_progress::(progress_context, &strategy); - - if stall_countdown.elapsed() > stall_timeout { - log::warn!( - target: "bridge", - "{} -> {} race has stalled. State: {:?}. Strategy: {:?}", - P::source_name(), - P::target_name(), - race_state, - strategy, - ); - - return Err(FailedClient::Both) - } else if race_state.nonces_to_submit.is_none() && - race_state.nonces_submitted.is_none() && - strategy.is_empty() - { - stall_countdown = Instant::now(); - } - - if source_client_is_online { - source_client_is_online = false; - - let nonces_to_deliver = - select_nonces_to_deliver(race_state.clone(), &mut strategy).await; - let best_at_source = strategy.best_at_source(); - - if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver { - log::debug!( - target: "bridge", - "Asking {} to prove nonces in range {:?} at block {:?}", - P::source_name(), - nonces_range, - at_block, - ); - source_generate_proof.set( - race_source.generate_proof(at_block, nonces_range, proof_parameters).fuse(), - ); - } else if source_nonces_required && best_at_source.is_some() { - log::debug!(target: "bridge", "Asking {} about message nonces", P::source_name()); - let at_block = race_state - .best_finalized_source_header_id_at_source - .as_ref() - .expect( - "source_nonces_required is only true when\ - best_finalized_source_header_id_at_source is Some; qed", - ) - .clone(); - source_nonces.set( - race_source - .nonces(at_block, best_at_source.expect("guaranteed by if condition; qed")) - .fuse(), - ); - } else { - source_client_is_online = true; - } - } - - if target_client_is_online { - target_client_is_online = false; - - if let Some((at_block, nonces_range, proof)) = race_state.nonces_to_submit.as_ref() { - log::debug!( - target: "bridge", - "Going to submit proof of messages in range {:?} to {} node", - nonces_range, - P::target_name(), - ); - target_submit_proof.set( - race_target - .submit_proof(at_block.clone(), nonces_range.clone(), proof.clone()) - .fuse(), - ); - } else if target_best_nonces_required { - log::debug!(target: "bridge", "Asking {} about best message nonces", P::target_name()); - let at_block = race_state - .best_target_header_id - .as_ref() - .expect("target_best_nonces_required is only true when best_target_header_id is Some; qed") - .clone(); - target_best_nonces.set(race_target.nonces(at_block, false).fuse()); - } else if target_finalized_nonces_required { - log::debug!(target: "bridge", "Asking {} about finalized message nonces", P::target_name()); - let at_block = race_state - .best_finalized_target_header_id - .as_ref() - .expect( - "target_finalized_nonces_required is only true when\ - best_finalized_target_header_id is Some; qed", - ) - .clone(); - target_finalized_nonces.set(race_target.nonces(at_block, true).fuse()); - } else { - target_client_is_online = true; - } - } - } -} - -impl Default - for RaceState -{ - fn default() -> Self { - RaceState { - best_finalized_source_header_id_at_source: None, - best_finalized_source_header_id_at_best_target: None, - best_target_header_id: None, - best_finalized_target_header_id: None, - nonces_to_submit: None, - nonces_submitted: None, - } - } -} - -/// Print race progress. -fn print_race_progress(prev_time: Instant, strategy: &S) -> Instant -where - P: MessageRace, - S: RaceStrategy, -{ - let now_time = Instant::now(); - - let need_update = now_time.saturating_duration_since(prev_time) > Duration::from_secs(10); - if !need_update { - return prev_time - } - - let now_best_nonce_at_source = strategy.best_at_source(); - let now_best_nonce_at_target = strategy.best_at_target(); - log::info!( - target: "bridge", - "Synced {:?} of {:?} nonces in {} -> {} race", - now_best_nonce_at_target, - now_best_nonce_at_source, - P::source_name(), - P::target_name(), - ); - now_time -} - -async fn select_nonces_to_deliver( - race_state: RaceState, - strategy: &mut Strategy, -) -> Option<(SourceHeaderId, RangeInclusive, Strategy::ProofParameters)> -where - SourceHeaderId: Clone, - Strategy: RaceStrategy, -{ - let best_finalized_source_header_id_at_best_target = - race_state.best_finalized_source_header_id_at_best_target.clone()?; - strategy - .select_nonces_to_deliver(race_state) - .await - .map(|(nonces_range, proof_parameters)| { - (best_finalized_source_header_id_at_best_target, nonces_range, proof_parameters) - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::message_race_strategy::BasicStrategy; - use relay_utils::HeaderId; - - #[async_std::test] - async fn proof_is_generated_at_best_block_known_to_target_node() { - const GENERATED_AT: u64 = 6; - const BEST_AT_SOURCE: u64 = 10; - const BEST_AT_TARGET: u64 = 8; - - // target node only knows about source' BEST_AT_TARGET block - // source node has BEST_AT_SOURCE > BEST_AT_TARGET block - let mut race_state = RaceState::<_, _, ()> { - best_finalized_source_header_id_at_source: Some(HeaderId( - BEST_AT_SOURCE, - BEST_AT_SOURCE, - )), - best_finalized_source_header_id_at_best_target: Some(HeaderId( - BEST_AT_TARGET, - BEST_AT_TARGET, - )), - best_target_header_id: Some(HeaderId(0, 0)), - best_finalized_target_header_id: Some(HeaderId(0, 0)), - nonces_to_submit: None, - nonces_submitted: None, - }; - - // we have some nonces to deliver and they're generated at GENERATED_AT < BEST_AT_SOURCE - let mut strategy = BasicStrategy::new(); - strategy.source_nonces_updated( - HeaderId(GENERATED_AT, GENERATED_AT), - SourceClientNonces { new_nonces: 0..=10, confirmed_nonce: None }, - ); - strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce: 5u64, nonces_data: () }, - &mut race_state, - ); - - // the proof will be generated on source, but using BEST_AT_TARGET block - assert_eq!( - select_nonces_to_deliver(race_state, &mut strategy).await, - Some((HeaderId(BEST_AT_TARGET, BEST_AT_TARGET), 6..=10, (),)) - ); - } -} diff --git a/polkadot/bridges/relays/messages/src/message_race_receiving.rs b/polkadot/bridges/relays/messages/src/message_race_receiving.rs deleted file mode 100644 index 5aa36cbd9c6..00000000000 --- a/polkadot/bridges/relays/messages/src/message_race_receiving.rs +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Message receiving race delivers proof-of-messages-delivery from "lane.target" to "lane.source". - -use crate::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{ - SourceClient as MessageLaneSourceClient, SourceClientState, - TargetClient as MessageLaneTargetClient, TargetClientState, - }, - message_race_loop::{ - MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, - TargetClientNonces, - }, - message_race_strategy::BasicStrategy, - metrics::MessageLaneLoopMetrics, -}; - -use async_trait::async_trait; -use bp_messages::MessageNonce; -use futures::stream::FusedStream; -use relay_utils::FailedClient; -use std::{marker::PhantomData, ops::RangeInclusive, time::Duration}; - -/// Message receiving confirmations delivery strategy. -type ReceivingConfirmationsBasicStrategy

= BasicStrategy< -

::TargetHeaderNumber, -

::TargetHeaderHash, -

::SourceHeaderNumber, -

::SourceHeaderHash, - RangeInclusive, -

::MessagesReceivingProof, ->; - -/// Run receiving confirmations race. -pub async fn run( - source_client: impl MessageLaneSourceClient

, - source_state_updates: impl FusedStream>, - target_client: impl MessageLaneTargetClient

, - target_state_updates: impl FusedStream>, - stall_timeout: Duration, - metrics_msg: Option, -) -> Result<(), FailedClient> { - crate::message_race_loop::run( - ReceivingConfirmationsRaceSource { - client: target_client, - metrics_msg: metrics_msg.clone(), - _phantom: Default::default(), - }, - target_state_updates, - ReceivingConfirmationsRaceTarget { - client: source_client, - metrics_msg, - _phantom: Default::default(), - }, - source_state_updates, - stall_timeout, - ReceivingConfirmationsBasicStrategy::

::new(), - ) - .await -} - -/// Messages receiving confirmations race. -struct ReceivingConfirmationsRace

(std::marker::PhantomData

); - -impl MessageRace for ReceivingConfirmationsRace

{ - type SourceHeaderId = TargetHeaderIdOf

; - type TargetHeaderId = SourceHeaderIdOf

; - - type MessageNonce = MessageNonce; - type Proof = P::MessagesReceivingProof; - - fn source_name() -> String { - format!("{}::ReceivingConfirmationsDelivery", P::TARGET_NAME) - } - - fn target_name() -> String { - format!("{}::ReceivingConfirmationsDelivery", P::SOURCE_NAME) - } -} - -/// Message receiving confirmations race source, which is a target of the lane. -struct ReceivingConfirmationsRaceSource { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl SourceClient> for ReceivingConfirmationsRaceSource -where - P: MessageLane, - C: MessageLaneTargetClient

, -{ - type Error = C::Error; - type NoncesRange = RangeInclusive; - type ProofParameters = (); - - async fn nonces( - &self, - at_block: TargetHeaderIdOf

, - prev_latest_nonce: MessageNonce, - ) -> Result<(TargetHeaderIdOf

, SourceClientNonces), Self::Error> { - let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_target_latest_received_nonce::

(latest_received_nonce); - } - Ok(( - at_block, - SourceClientNonces { - new_nonces: prev_latest_nonce + 1..=latest_received_nonce, - confirmed_nonce: None, - }, - )) - } - - #[allow(clippy::unit_arg)] - async fn generate_proof( - &self, - at_block: TargetHeaderIdOf

, - nonces: RangeInclusive, - _proof_parameters: Self::ProofParameters, - ) -> Result< - (TargetHeaderIdOf

, RangeInclusive, P::MessagesReceivingProof), - Self::Error, - > { - self.client - .prove_messages_receiving(at_block) - .await - .map(|(at_block, proof)| (at_block, nonces, proof)) - } -} - -/// Message receiving confirmations race target, which is a source of the lane. -struct ReceivingConfirmationsRaceTarget { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl TargetClient> for ReceivingConfirmationsRaceTarget -where - P: MessageLane, - C: MessageLaneSourceClient

, -{ - type Error = C::Error; - type TargetNoncesData = (); - - async fn require_source_header(&self, id: TargetHeaderIdOf

) { - self.client.require_target_header_on_source(id).await - } - - async fn nonces( - &self, - at_block: SourceHeaderIdOf

, - update_metrics: bool, - ) -> Result<(SourceHeaderIdOf

, TargetClientNonces<()>), Self::Error> { - let (at_block, latest_confirmed_nonce) = - self.client.latest_confirmed_received_nonce(at_block).await?; - if update_metrics { - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_source_latest_confirmed_nonce::

(latest_confirmed_nonce); - } - } - Ok((at_block, TargetClientNonces { latest_nonce: latest_confirmed_nonce, nonces_data: () })) - } - - async fn submit_proof( - &self, - generated_at_block: TargetHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesReceivingProof, - ) -> Result, Self::Error> { - self.client.submit_messages_receiving_proof(generated_at_block, proof).await?; - Ok(nonces) - } -} - -impl NoncesRange for RangeInclusive { - fn begin(&self) -> MessageNonce { - *RangeInclusive::::start(self) - } - - fn end(&self) -> MessageNonce { - *RangeInclusive::::end(self) - } - - fn greater_than(self, nonce: MessageNonce) -> Option { - let next_nonce = nonce + 1; - let end = *self.end(); - if next_nonce > end { - None - } else { - Some(std::cmp::max(self.begin(), next_nonce)..=end) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn range_inclusive_works_as_nonces_range() { - let range = 20..=30; - - assert_eq!(NoncesRange::begin(&range), 20); - assert_eq!(NoncesRange::end(&range), 30); - assert_eq!(range.clone().greater_than(10), Some(20..=30)); - assert_eq!(range.clone().greater_than(19), Some(20..=30)); - assert_eq!(range.clone().greater_than(20), Some(21..=30)); - assert_eq!(range.clone().greater_than(25), Some(26..=30)); - assert_eq!(range.clone().greater_than(29), Some(30..=30)); - assert_eq!(range.greater_than(30), None); - } -} diff --git a/polkadot/bridges/relays/messages/src/message_race_strategy.rs b/polkadot/bridges/relays/messages/src/message_race_strategy.rs deleted file mode 100644 index 4ecf451deb0..00000000000 --- a/polkadot/bridges/relays/messages/src/message_race_strategy.rs +++ /dev/null @@ -1,517 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Basic delivery strategy. The strategy selects nonces if: -//! -//! 1) there are more nonces on the source side than on the target side; -//! 2) new nonces may be proved to target node (i.e. they have appeared at the -//! block, which is known to the target node). - -use crate::message_race_loop::{ - NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces, -}; - -use async_trait::async_trait; -use bp_messages::MessageNonce; -use relay_utils::HeaderId; -use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, ops::RangeInclusive}; - -/// Queue of nonces known to the source node. -pub type SourceRangesQueue = - VecDeque<(HeaderId, SourceNoncesRange)>; - -/// Nonces delivery strategy. -#[derive(Debug)] -pub struct BasicStrategy< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, -> { - /// All queued nonces. - source_queue: SourceRangesQueue, - /// The best nonce known to target node (at its best block). `None` if it has not been received - /// yet. - best_target_nonce: Option, - /// Unused generic types dump. - _phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>, -} - -impl< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > - BasicStrategy< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > where - SourceHeaderHash: Clone, - SourceHeaderNumber: Clone + Ord, - SourceNoncesRange: NoncesRange, -{ - /// Create new delivery strategy. - pub fn new() -> Self { - BasicStrategy { - source_queue: VecDeque::new(), - best_target_nonce: None, - _phantom: Default::default(), - } - } - - /// Reference to source queue. - pub(crate) fn source_queue( - &self, - ) -> &VecDeque<(HeaderId, SourceNoncesRange)> { - &self.source_queue - } - - /// Mutable reference to source queue to use in tests. - #[cfg(test)] - pub(crate) fn source_queue_mut( - &mut self, - ) -> &mut VecDeque<(HeaderId, SourceNoncesRange)> { - &mut self.source_queue - } - - /// Returns index of the latest source queue entry, that may be delivered to the target node. - /// - /// Returns `None` if no entries may be delivered. All entries before and including the - /// `Some(_)` index are guaranteed to be witnessed at source blocks that are known to be - /// finalized at the target node. - pub fn maximal_available_source_queue_index( - &self, - race_state: RaceState< - HeaderId, - HeaderId, - Proof, - >, - ) -> Option { - // if we do not know best nonce at target node, we can't select anything - let _ = self.best_target_nonce?; - - // if we have already selected nonces that we want to submit, do nothing - if race_state.nonces_to_submit.is_some() { - return None - } - - // if we already submitted some nonces, do nothing - if race_state.nonces_submitted.is_some() { - return None - } - - // 1) we want to deliver all nonces, starting from `target_nonce + 1` - // 2) we can't deliver new nonce until header, that has emitted this nonce, is finalized - // by target client - // 3) selector is used for more complicated logic - // - // => let's first select range of entries inside deque that are already finalized at - // the target client and pass this range to the selector - let best_header_at_target = race_state.best_finalized_source_header_id_at_best_target?; - self.source_queue - .iter() - .enumerate() - .take_while(|(_, (queued_at, _))| queued_at.0 <= best_header_at_target.0) - .map(|(index, _)| index) - .last() - } - - /// Remove all nonces that are less than or equal to given nonce from the source queue. - pub fn remove_le_nonces_from_source_queue(&mut self, nonce: MessageNonce) { - while let Some((queued_at, queued_range)) = self.source_queue.pop_front() { - if let Some(range_to_requeue) = queued_range.greater_than(nonce) { - self.source_queue.push_front((queued_at, range_to_requeue)); - break - } - } - } -} - -#[async_trait] -impl< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > - RaceStrategy< - HeaderId, - HeaderId, - Proof, - > - for BasicStrategy< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > where - SourceHeaderHash: Clone + Debug + Send, - SourceHeaderNumber: Clone + Ord + Debug + Send, - SourceNoncesRange: NoncesRange + Debug + Send, - TargetHeaderHash: Debug + Send, - TargetHeaderNumber: Debug + Send, - Proof: Debug + Send, -{ - type SourceNoncesRange = SourceNoncesRange; - type ProofParameters = (); - type TargetNoncesData = (); - - fn is_empty(&self) -> bool { - self.source_queue.is_empty() - } - - fn required_source_header_at_target( - &self, - current_best: &HeaderId, - ) -> Option> { - self.source_queue - .back() - .and_then(|(h, _)| if h.0 > current_best.0 { Some(h.clone()) } else { None }) - } - - fn best_at_source(&self) -> Option { - let best_in_queue = self.source_queue.back().map(|(_, range)| range.end()); - match (best_in_queue, self.best_target_nonce) { - (Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => - Some(best_in_queue), - (_, Some(best_target_nonce)) => Some(best_target_nonce), - (_, None) => None, - } - } - - fn best_at_target(&self) -> Option { - self.best_target_nonce - } - - fn source_nonces_updated( - &mut self, - at_block: HeaderId, - nonces: SourceClientNonces, - ) { - let best_in_queue = self - .source_queue - .back() - .map(|(_, range)| range.end()) - .or(self.best_target_nonce) - .unwrap_or_default(); - self.source_queue.extend( - nonces - .new_nonces - .greater_than(best_in_queue) - .into_iter() - .map(move |range| (at_block.clone(), range)), - ) - } - - fn best_target_nonces_updated( - &mut self, - nonces: TargetClientNonces<()>, - race_state: &mut RaceState< - HeaderId, - HeaderId, - Proof, - >, - ) { - let nonce = nonces.latest_nonce; - - if let Some(best_target_nonce) = self.best_target_nonce { - if nonce < best_target_nonce { - return - } - } - - while let Some(true) = self.source_queue.front().map(|(_, range)| range.begin() <= nonce) { - let maybe_subrange = self.source_queue.pop_front().and_then(|(at_block, range)| { - range.greater_than(nonce).map(|subrange| (at_block, subrange)) - }); - if let Some((at_block, subrange)) = maybe_subrange { - self.source_queue.push_front((at_block, subrange)); - break - } - } - - let need_to_select_new_nonces = race_state - .nonces_to_submit - .as_ref() - .map(|(_, nonces, _)| *nonces.end() <= nonce) - .unwrap_or(false); - if need_to_select_new_nonces { - race_state.nonces_to_submit = None; - } - - let need_new_nonces_to_submit = race_state - .nonces_submitted - .as_ref() - .map(|nonces| *nonces.end() <= nonce) - .unwrap_or(false); - if need_new_nonces_to_submit { - race_state.nonces_submitted = None; - } - - self.best_target_nonce = - Some(std::cmp::max(self.best_target_nonce.unwrap_or(nonces.latest_nonce), nonce)); - } - - fn finalized_target_nonces_updated( - &mut self, - nonces: TargetClientNonces<()>, - _race_state: &mut RaceState< - HeaderId, - HeaderId, - Proof, - >, - ) { - self.best_target_nonce = Some(std::cmp::max( - self.best_target_nonce.unwrap_or(nonces.latest_nonce), - nonces.latest_nonce, - )); - } - - async fn select_nonces_to_deliver( - &mut self, - race_state: RaceState< - HeaderId, - HeaderId, - Proof, - >, - ) -> Option<(RangeInclusive, Self::ProofParameters)> { - let maximal_source_queue_index = self.maximal_available_source_queue_index(race_state)?; - let range_begin = self.source_queue[0].1.begin(); - let range_end = self.source_queue[maximal_source_queue_index].1.end(); - self.remove_le_nonces_from_source_queue(range_end); - Some((range_begin..=range_end, ())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - message_lane::MessageLane, - message_lane_loop::tests::{ - header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash, - TestSourceHeaderNumber, - }, - }; - - type SourceNoncesRange = RangeInclusive; - - type BasicStrategy

= super::BasicStrategy< -

::SourceHeaderNumber, -

::SourceHeaderHash, -

::TargetHeaderNumber, -

::TargetHeaderHash, - SourceNoncesRange, -

::MessagesProof, - >; - - fn source_nonces(new_nonces: SourceNoncesRange) -> SourceClientNonces { - SourceClientNonces { new_nonces, confirmed_nonce: None } - } - - fn target_nonces(latest_nonce: MessageNonce) -> TargetClientNonces<()> { - TargetClientNonces { latest_nonce, nonces_data: () } - } - - #[test] - fn strategy_is_empty_works() { - let mut strategy = BasicStrategy::::new(); - assert!(strategy.is_empty()); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); - assert!(!strategy.is_empty()); - } - - #[test] - fn best_at_source_is_never_lower_than_target_nonce() { - let mut strategy = BasicStrategy::::new(); - assert_eq!(strategy.best_at_source(), None); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - assert_eq!(strategy.best_at_source(), None); - strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); - assert_eq!(strategy.source_queue, vec![]); - assert_eq!(strategy.best_at_source(), Some(10)); - } - - #[test] - fn source_nonce_is_never_lower_than_known_target_nonce() { - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - assert_eq!(strategy.source_queue, vec![]); - } - - #[test] - fn source_nonce_is_never_lower_than_latest_known_source_nonce() { - let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - strategy.source_nonces_updated(header_id(2), source_nonces(1..=3)); - strategy.source_nonces_updated(header_id(2), source_nonces(1..=5)); - assert_eq!(strategy.source_queue, vec![(header_id(1), 1..=5)]); - } - - #[test] - fn target_nonce_is_never_lower_than_latest_known_target_nonce() { - let mut strategy = BasicStrategy::::new(); - assert_eq!(strategy.best_target_nonce, None); - strategy.best_target_nonces_updated(target_nonces(10), &mut Default::default()); - assert_eq!(strategy.best_target_nonce, Some(10)); - strategy.best_target_nonces_updated(target_nonces(5), &mut Default::default()); - assert_eq!(strategy.best_target_nonce, Some(10)); - } - - #[test] - fn updated_target_nonce_removes_queued_entries() { - let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - strategy.source_nonces_updated(header_id(2), source_nonces(6..=10)); - strategy.source_nonces_updated(header_id(3), source_nonces(11..=15)); - strategy.source_nonces_updated(header_id(4), source_nonces(16..=20)); - strategy.best_target_nonces_updated(target_nonces(15), &mut Default::default()); - assert_eq!(strategy.source_queue, vec![(header_id(4), 16..=20)]); - strategy.best_target_nonces_updated(target_nonces(17), &mut Default::default()); - assert_eq!(strategy.source_queue, vec![(header_id(4), 18..=20)]); - } - - #[test] - fn selected_nonces_are_dropped_on_target_nonce_update() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_to_submit = Some((header_id(1), 5..=10, (5..=10, None))); - strategy.best_target_nonces_updated(target_nonces(7), &mut state); - assert!(state.nonces_to_submit.is_some()); - strategy.best_target_nonces_updated(target_nonces(10), &mut state); - assert!(state.nonces_to_submit.is_none()); - } - - #[test] - fn submitted_nonces_are_dropped_on_target_nonce_update() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_submitted = Some(5..=10); - strategy.best_target_nonces_updated(target_nonces(7), &mut state); - assert!(state.nonces_submitted.is_some()); - strategy.best_target_nonces_updated(target_nonces(10), &mut state); - assert!(state.nonces_submitted.is_none()); - } - - #[async_std::test] - async fn nothing_is_selected_if_something_is_already_selected() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_to_submit = Some((header_id(1), 1..=10, (1..=10, None))); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - } - - #[async_std::test] - async fn nothing_is_selected_if_something_is_already_submitted() { - let mut state = RaceState::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_submitted = Some(1..=10); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - } - - #[async_std::test] - async fn select_nonces_to_deliver_works() { - let mut state = RaceState::<_, _, TestMessagesProof>::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); - strategy.source_nonces_updated(header_id(2), source_nonces(2..=2)); - strategy.source_nonces_updated(header_id(3), source_nonces(3..=6)); - strategy.source_nonces_updated(header_id(5), source_nonces(7..=8)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((1..=6, ()))); - strategy.best_target_nonces_updated(target_nonces(6), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(5)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((7..=8, ()))); - strategy.best_target_nonces_updated(target_nonces(8), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - } - - #[test] - fn maximal_available_source_queue_index_works() { - let mut state = RaceState::<_, _, TestMessagesProof>::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=3)); - strategy.source_nonces_updated(header_id(2), source_nonces(4..=6)); - strategy.source_nonces_updated(header_id(3), source_nonces(7..=9)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(0)); - assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), None); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), Some(0)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), Some(1)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(3)); - assert_eq!(strategy.maximal_available_source_queue_index(state.clone()), Some(2)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - assert_eq!(strategy.maximal_available_source_queue_index(state), Some(2)); - } - - #[test] - fn remove_le_nonces_from_source_queue_works() { - let mut state = RaceState::<_, _, TestMessagesProof>::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=3)); - strategy.source_nonces_updated(header_id(2), source_nonces(4..=6)); - strategy.source_nonces_updated(header_id(3), source_nonces(7..=9)); - - fn source_queue_nonces( - source_queue: &SourceRangesQueue< - TestSourceHeaderHash, - TestSourceHeaderNumber, - SourceNoncesRange, - >, - ) -> Vec { - source_queue.iter().flat_map(|(_, range)| range.clone()).collect() - } - - strategy.remove_le_nonces_from_source_queue(1); - assert_eq!(source_queue_nonces(&strategy.source_queue), vec![2, 3, 4, 5, 6, 7, 8, 9],); - - strategy.remove_le_nonces_from_source_queue(5); - assert_eq!(source_queue_nonces(&strategy.source_queue), vec![6, 7, 8, 9]); - - strategy.remove_le_nonces_from_source_queue(9); - assert_eq!(source_queue_nonces(&strategy.source_queue), Vec::::new()); - - strategy.remove_le_nonces_from_source_queue(100); - assert_eq!(source_queue_nonces(&strategy.source_queue), Vec::::new()); - } -} diff --git a/polkadot/bridges/relays/messages/src/metrics.rs b/polkadot/bridges/relays/messages/src/metrics.rs deleted file mode 100644 index 4decb7e092e..00000000000 --- a/polkadot/bridges/relays/messages/src/metrics.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for message lane relay loop. - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{SourceClientState, TargetClientState}, -}; - -use bp_messages::MessageNonce; -use finality_relay::SyncLoopMetrics; -use relay_utils::metrics::{ - metric_name, register, GaugeVec, Metric, Opts, PrometheusError, Registry, U64, -}; - -/// Message lane relay metrics. -/// -/// Cloning only clones references. -#[derive(Clone)] -pub struct MessageLaneLoopMetrics { - /// Best finalized block numbers - "source", "source_at_target", "target_at_source". - source_to_target_finality_metrics: SyncLoopMetrics, - /// Best finalized block numbers - "source", "target", "source_at_target", "target_at_source". - target_to_source_finality_metrics: SyncLoopMetrics, - /// Lane state nonces: "source_latest_generated", "source_latest_confirmed", - /// "target_latest_received", "target_latest_confirmed". - lane_state_nonces: GaugeVec, -} - -impl MessageLaneLoopMetrics { - /// Create and register messages loop metrics. - pub fn new(prefix: Option<&str>) -> Result { - Ok(MessageLaneLoopMetrics { - source_to_target_finality_metrics: SyncLoopMetrics::new( - prefix, - "source", - "source_at_target", - )?, - target_to_source_finality_metrics: SyncLoopMetrics::new( - prefix, - "target", - "target_at_source", - )?, - lane_state_nonces: GaugeVec::new( - Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), - &["type"], - )?, - }) - } - - /// Update source client state metrics. - pub fn update_source_state(&self, source_client_state: SourceClientState

) { - self.source_to_target_finality_metrics - .update_best_block_at_source(source_client_state.best_self.0.into()); - self.target_to_source_finality_metrics.update_best_block_at_target( - source_client_state.best_finalized_peer_at_best_self.0.into(), - ); - self.target_to_source_finality_metrics.update_using_same_fork( - source_client_state.best_finalized_peer_at_best_self.1 == - source_client_state.actual_best_finalized_peer_at_best_self.1, - ); - } - - /// Update target client state metrics. - pub fn update_target_state(&self, target_client_state: TargetClientState

) { - self.target_to_source_finality_metrics - .update_best_block_at_source(target_client_state.best_self.0.into()); - self.source_to_target_finality_metrics.update_best_block_at_target( - target_client_state.best_finalized_peer_at_best_self.0.into(), - ); - self.source_to_target_finality_metrics.update_using_same_fork( - target_client_state.best_finalized_peer_at_best_self.1 == - target_client_state.actual_best_finalized_peer_at_best_self.1, - ); - } - - /// Update latest generated nonce at source. - pub fn update_source_latest_generated_nonce( - &self, - source_latest_generated_nonce: MessageNonce, - ) { - self.lane_state_nonces - .with_label_values(&["source_latest_generated"]) - .set(source_latest_generated_nonce); - } - - /// Update the latest confirmed nonce at source. - pub fn update_source_latest_confirmed_nonce( - &self, - source_latest_confirmed_nonce: MessageNonce, - ) { - self.lane_state_nonces - .with_label_values(&["source_latest_confirmed"]) - .set(source_latest_confirmed_nonce); - } - - /// Update the latest received nonce at target. - pub fn update_target_latest_received_nonce( - &self, - target_latest_generated_nonce: MessageNonce, - ) { - self.lane_state_nonces - .with_label_values(&["target_latest_received"]) - .set(target_latest_generated_nonce); - } - - /// Update the latest confirmed nonce at target. - pub fn update_target_latest_confirmed_nonce( - &self, - target_latest_confirmed_nonce: MessageNonce, - ) { - self.lane_state_nonces - .with_label_values(&["target_latest_confirmed"]) - .set(target_latest_confirmed_nonce); - } -} - -impl Metric for MessageLaneLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - self.source_to_target_finality_metrics.register(registry)?; - self.target_to_source_finality_metrics.register(registry)?; - register(self.lane_state_nonces.clone(), registry)?; - Ok(()) - } -} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs deleted file mode 100644 index d6fec7f1297..00000000000 --- a/polkadot/bridges/relays/messages/src/relay_strategy/altruistic_strategy.rs +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Altruistic relay strategy - -use async_trait::async_trait; - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{ - SourceClient as MessageLaneSourceClient, TargetClient as MessageLaneTargetClient, - }, - relay_strategy::{RelayReference, RelayStrategy}, -}; - -/// The relayer doesn't care about rewards. -#[derive(Clone)] -pub struct AltruisticStrategy; - -#[async_trait] -impl RelayStrategy for AltruisticStrategy { - async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - &mut self, - _reference: &mut RelayReference, - ) -> bool { - true - } -} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs deleted file mode 100644 index 1e9ef5bdbf8..00000000000 --- a/polkadot/bridges/relays/messages/src/relay_strategy/enforcement_strategy.rs +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! enforcement strategy - -use num_traits::Zero; - -use bp_messages::{MessageNonce, Weight}; -use bp_runtime::messages::DispatchFeePayment; - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{ - MessageDetails, SourceClient as MessageLaneSourceClient, - TargetClient as MessageLaneTargetClient, - }, - message_race_loop::NoncesRange, - relay_strategy::{RelayMessagesBatchReference, RelayReference, RelayStrategy}, -}; - -/// Do hard check and run soft check strategy -#[derive(Clone)] -pub struct EnforcementStrategy { - strategy: Strategy, -} - -impl EnforcementStrategy { - pub fn new(strategy: Strategy) -> Self { - Self { strategy } - } -} - -impl EnforcementStrategy { - pub async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - &mut self, - reference: RelayMessagesBatchReference, - ) -> Option { - let mut hard_selected_count = 0; - let mut soft_selected_count = 0; - - let mut selected_weight: Weight = 0; - let mut selected_count: MessageNonce = 0; - - let hard_selected_begin_nonce = - reference.nonces_queue[reference.nonces_queue_range.start].1.begin(); - - // relay reference - let mut relay_reference = RelayReference { - lane_source_client: reference.lane_source_client.clone(), - lane_target_client: reference.lane_target_client.clone(), - - selected_reward: P::SourceChainBalance::zero(), - selected_cost: P::SourceChainBalance::zero(), - selected_size: 0, - - total_reward: P::SourceChainBalance::zero(), - total_confirmations_cost: P::SourceChainBalance::zero(), - total_cost: P::SourceChainBalance::zero(), - - hard_selected_begin_nonce, - selected_prepaid_nonces: 0, - selected_unpaid_weight: 0, - - index: 0, - nonce: 0, - details: MessageDetails { - dispatch_weight: 0, - size: 0, - reward: P::SourceChainBalance::zero(), - dispatch_fee_payment: DispatchFeePayment::AtSourceChain, - }, - }; - - let all_ready_nonces = reference - .nonces_queue - .range(reference.nonces_queue_range.clone()) - .flat_map(|(_, ready_nonces)| ready_nonces.iter()) - .enumerate(); - for (index, (nonce, details)) in all_ready_nonces { - relay_reference.index = index; - relay_reference.nonce = *nonce; - relay_reference.details = *details; - - // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` - // and `max_messages_size_in_single_batch`, we may still try to submit transaction - // with single message if message overflows these limits. The worst case would be if - // transaction will be rejected by the target runtime, but at least we have tried. - - // limit messages in the batch by weight - let new_selected_weight = match selected_weight.checked_add(details.dispatch_weight) { - Some(new_selected_weight) - if new_selected_weight <= reference.max_messages_weight_in_single_batch => - new_selected_weight, - new_selected_weight if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with declared dispatch \ - weight {:?} that overflows maximal configured weight {}", - new_selected_weight, - reference.max_messages_weight_in_single_batch, - ); - new_selected_weight.unwrap_or(Weight::MAX) - }, - _ => break, - }; - - // limit messages in the batch by size - let new_selected_size = match relay_reference.selected_size.checked_add(details.size) { - Some(new_selected_size) - if new_selected_size <= reference.max_messages_size_in_single_batch => - new_selected_size, - new_selected_size if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with message \ - size {:?} that overflows maximal configured size {}", - new_selected_size, - reference.max_messages_size_in_single_batch, - ); - new_selected_size.unwrap_or(u32::MAX) - }, - _ => break, - }; - - // limit number of messages in the batch - let new_selected_count = selected_count + 1; - if new_selected_count > reference.max_messages_in_this_batch { - break - } - relay_reference.selected_size = new_selected_size; - - // If dispatch fee has been paid at the source chain, it means that it is **relayer** - // who's paying for dispatch at the target chain AND reward must cover this dispatch - // fee. - // - // If dispatch fee is paid at the target chain, it means that it'll be withdrawn from - // the dispatch origin account AND reward is not covering this fee. - // - // So in the latter case we're not adding the dispatch weight to the delivery - // transaction weight. - let mut new_selected_prepaid_nonces = relay_reference.selected_prepaid_nonces; - let new_selected_unpaid_weight = match details.dispatch_fee_payment { - DispatchFeePayment::AtSourceChain => { - new_selected_prepaid_nonces += 1; - relay_reference.selected_unpaid_weight.saturating_add(details.dispatch_weight) - }, - DispatchFeePayment::AtTargetChain => relay_reference.selected_unpaid_weight, - }; - relay_reference.selected_prepaid_nonces = new_selected_prepaid_nonces; - relay_reference.selected_unpaid_weight = new_selected_unpaid_weight; - - // now the message has passed all 'strong' checks, and we CAN deliver it. But do we WANT - // to deliver it? It depends on the relayer strategy. - if self.strategy.decide(&mut relay_reference).await { - soft_selected_count = index + 1; - } - - hard_selected_count = index + 1; - selected_weight = new_selected_weight; - selected_count = new_selected_count; - } - - if hard_selected_count != soft_selected_count { - let hard_selected_end_nonce = - hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1; - let soft_selected_begin_nonce = hard_selected_begin_nonce; - let soft_selected_end_nonce = - soft_selected_begin_nonce + soft_selected_count as MessageNonce - 1; - log::warn!( - target: "bridge", - "Relayer may deliver nonces [{:?}; {:?}], but because of its strategy it has selected \ - nonces [{:?}; {:?}].", - hard_selected_begin_nonce, - hard_selected_end_nonce, - soft_selected_begin_nonce, - soft_selected_end_nonce, - ); - - hard_selected_count = soft_selected_count; - } - - if hard_selected_count != 0 { - if relay_reference.selected_reward != P::SourceChainBalance::zero() && - relay_reference.selected_cost != P::SourceChainBalance::zero() - { - log::trace!( - target: "bridge", - "Expected reward from delivering nonces [{:?}; {:?}] is: {:?} - {:?} = {:?}", - hard_selected_begin_nonce, - hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1, - &relay_reference.selected_reward, - &relay_reference.selected_cost, - relay_reference.selected_reward - relay_reference.selected_cost, - ); - } - - Some(hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1) - } else { - None - } - } -} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs deleted file mode 100644 index 4ac7fe1d0ed..00000000000 --- a/polkadot/bridges/relays/messages/src/relay_strategy/mix_strategy.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Adapter for using `enum RelayerMode` in a context which requires `RelayStrategy`. - -use async_trait::async_trait; - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{ - RelayerMode, SourceClient as MessageLaneSourceClient, - TargetClient as MessageLaneTargetClient, - }, - relay_strategy::{AltruisticStrategy, RationalStrategy, RelayReference, RelayStrategy}, -}; - -/// `RelayerMode` adapter. -#[derive(Clone)] -pub struct MixStrategy { - relayer_mode: RelayerMode, -} - -impl MixStrategy { - /// Create mix strategy instance - pub fn new(relayer_mode: RelayerMode) -> Self { - Self { relayer_mode } - } -} - -#[async_trait] -impl RelayStrategy for MixStrategy { - async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - &mut self, - reference: &mut RelayReference, - ) -> bool { - match self.relayer_mode { - RelayerMode::Altruistic => AltruisticStrategy.decide(reference).await, - RelayerMode::Rational => RationalStrategy.decide(reference).await, - } - } -} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs b/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs deleted file mode 100644 index d902bd93e5c..00000000000 --- a/polkadot/bridges/relays/messages/src/relay_strategy/mod.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relayer strategy - -use std::ops::Range; - -use async_trait::async_trait; - -use bp_messages::{MessageNonce, Weight}; - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{ - MessageDetails, MessageDetailsMap, SourceClient as MessageLaneSourceClient, - TargetClient as MessageLaneTargetClient, - }, - message_race_strategy::SourceRangesQueue, -}; - -pub(crate) use self::enforcement_strategy::*; -pub use self::{altruistic_strategy::*, mix_strategy::*, rational_strategy::*}; - -mod altruistic_strategy; -mod enforcement_strategy; -mod mix_strategy; -mod rational_strategy; - -/// Relayer strategy trait -#[async_trait] -pub trait RelayStrategy: 'static + Clone + Send + Sync { - /// The relayer decide how to process nonce by reference. - /// From given set of source nonces, that are ready to be delivered, select nonces - /// to fit into single delivery transaction. - /// - /// The function returns last nonce that must be delivered to the target chain. - async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - &mut self, - reference: &mut RelayReference, - ) -> bool; -} - -/// Reference data for participating in relay -pub struct RelayReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Current block reward summary - pub selected_reward: P::SourceChainBalance, - /// Current block cost summary - pub selected_cost: P::SourceChainBalance, - /// Messages size summary - pub selected_size: u32, - - /// Current block reward summary - pub total_reward: P::SourceChainBalance, - /// All confirmations cost - pub total_confirmations_cost: P::SourceChainBalance, - /// Current block cost summary - pub total_cost: P::SourceChainBalance, - - /// Hard check begin nonce - pub hard_selected_begin_nonce: MessageNonce, - /// Count prepaid nonces - pub selected_prepaid_nonces: MessageNonce, - /// Unpaid nonces weight summary - pub selected_unpaid_weight: Weight, - - /// Index by all ready nonces - pub index: usize, - /// Current nonce - pub nonce: MessageNonce, - /// Current nonce details - pub details: MessageDetails, -} - -/// Relay reference data -pub struct RelayMessagesBatchReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { - /// Maximal number of relayed messages in single delivery transaction. - pub max_messages_in_this_batch: MessageNonce, - /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. - pub max_messages_weight_in_single_batch: Weight, - /// Maximal cumulative size of relayed messages in single delivery transaction. - pub max_messages_size_in_single_batch: u32, - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Source queue. - pub nonces_queue: SourceRangesQueue< - P::SourceHeaderHash, - P::SourceHeaderNumber, - MessageDetailsMap, - >, - /// Source queue range - pub nonces_queue_range: Range, -} diff --git a/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs b/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs deleted file mode 100644 index fd0a1ffafc8..00000000000 --- a/polkadot/bridges/relays/messages/src/relay_strategy/rational_strategy.rs +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rational relay strategy - -use async_trait::async_trait; -use num_traits::SaturatingAdd; - -use bp_messages::MessageNonce; - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{ - SourceClient as MessageLaneSourceClient, TargetClient as MessageLaneTargetClient, - }, - relay_strategy::{RelayReference, RelayStrategy}, -}; - -/// The relayer will deliver all messages and confirmations as long as he's not losing any -/// funds. -#[derive(Clone)] -pub struct RationalStrategy; - -#[async_trait] -impl RelayStrategy for RationalStrategy { - async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - &mut self, - reference: &mut RelayReference, - ) -> bool { - // technically, multiple confirmations will be delivered in a single transaction, - // meaning less loses for relayer. But here we don't know the final relayer yet, so - // we're adding a separate transaction for every message. Normally, this cost is covered - // by the message sender. Probably reconsider this? - let confirmation_transaction_cost = - reference.lane_source_client.estimate_confirmation_transaction().await; - - let delivery_transaction_cost = match reference - .lane_target_client - .estimate_delivery_transaction_in_source_tokens( - reference.hard_selected_begin_nonce..= - (reference.hard_selected_begin_nonce + reference.index as MessageNonce), - reference.selected_prepaid_nonces, - reference.selected_unpaid_weight, - reference.selected_size as u32, - ) - .await - { - Ok(v) => v, - Err(err) => { - log::debug!( - target: "bridge", - "Failed to estimate delivery transaction cost: {:?}. No nonces selected for delivery", - err, - ); - return false - }, - }; - - // if it is the first message that makes reward less than cost, let's log it - // if this message makes batch profitable again, let's log it - let is_total_reward_less_than_cost = reference.total_reward < reference.total_cost; - let prev_total_cost = reference.total_cost; - let prev_total_reward = reference.total_reward; - reference.total_confirmations_cost = reference - .total_confirmations_cost - .saturating_add(&confirmation_transaction_cost); - reference.total_reward = reference.total_reward.saturating_add(&reference.details.reward); - reference.total_cost = - reference.total_confirmations_cost.saturating_add(&delivery_transaction_cost); - if !is_total_reward_less_than_cost && reference.total_reward < reference.total_cost { - log::debug!( - target: "bridge", - "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it larger than \ - total reward {:?}->{:?}", - reference.nonce, - reference.details.reward, - prev_total_cost, - reference.total_cost, - prev_total_reward, - reference.total_reward, - ); - } else if is_total_reward_less_than_cost && reference.total_reward >= reference.total_cost { - log::debug!( - target: "bridge", - "Message with nonce {} (reward = {:?}) changes total cost {:?}->{:?} and makes it less than or \ - equal to the total reward {:?}->{:?} (again)", - reference.nonce, - reference.details.reward, - prev_total_cost, - reference.total_cost, - prev_total_reward, - reference.total_reward, - ); - } - - // Rational relayer never want to lose his funds - if reference.total_reward >= reference.total_cost { - reference.selected_reward = reference.total_reward; - reference.selected_cost = reference.total_cost; - return true - } - - false - } -} diff --git a/polkadot/bridges/relays/utils/Cargo.toml b/polkadot/bridges/relays/utils/Cargo.toml deleted file mode 100644 index bb69849da26..00000000000 --- a/polkadot/bridges/relays/utils/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "relay-utils" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[dependencies] -ansi_term = "0.12" -anyhow = "1.0" -async-std = "1.6.5" -async-trait = "0.1.40" -backoff = "0.2" -isahc = "1.2" -env_logger = "0.8.2" -futures = "0.3.5" -jsonpath_lib = "0.2" -log = "0.4.11" -num-traits = "0.2" -serde_json = "1.0" -sysinfo = "0.15" -time = { version = "0.3", features = ["formatting", "local-offset", "std"] } -tokio = { version = "1.8", features = ["rt"] } -thiserror = "1.0.26" - -# Bridge dependencies - -bp-runtime = { path = "../../primitives/runtime" } - -# Substrate dependencies - -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/polkadot/bridges/relays/utils/src/error.rs b/polkadot/bridges/relays/utils/src/error.rs deleted file mode 100644 index 26f1d0cacef..00000000000 --- a/polkadot/bridges/relays/utils/src/error.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use std::net::AddrParseError; -use thiserror::Error; - -/// Result type used by relay utilities. -pub type Result = std::result::Result; - -/// Relay utilities errors. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to request a float value from HTTP service. - #[error("Failed to fetch token price from remote server: {0}")] - FetchTokenPrice(#[source] anyhow::Error), - /// Failed to parse the response from HTTP service. - #[error("Failed to parse HTTP service response: {0:?}. Response: {1:?}")] - ParseHttp(serde_json::Error, String), - /// Failed to select response value from the Json response. - #[error("Failed to select value from response: {0:?}. Response: {1:?}")] - SelectResponseValue(jsonpath_lib::JsonPathError, String), - /// Failed to parse float value from the selected value. - #[error( - "Failed to parse float value {0:?} from response. It is assumed to be positive and normal" - )] - ParseFloat(f64), - /// Couldn't found value in the JSON response. - #[error("Missing required value from response: {0:?}")] - MissingResponseValue(String), - /// Invalid host address was used for exposing Prometheus metrics. - #[error("Invalid host {0} is used to expose Prometheus metrics: {1}")] - ExposingMetricsInvalidHost(String, AddrParseError), - /// Prometheus error. - #[error("{0}")] - Prometheus(#[from] substrate_prometheus_endpoint::prometheus::Error), -} diff --git a/polkadot/bridges/relays/utils/src/initialize.rs b/polkadot/bridges/relays/utils/src/initialize.rs deleted file mode 100644 index ad69a766e62..00000000000 --- a/polkadot/bridges/relays/utils/src/initialize.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relayer initialization functions. - -use std::{cell::RefCell, fmt::Display, io::Write}; - -async_std::task_local! { - pub(crate) static LOOP_NAME: RefCell = RefCell::new(String::default()); -} - -/// Initialize relay environment. -pub fn initialize_relay() { - initialize_logger(true); -} - -/// Initialize Relay logger instance. -pub fn initialize_logger(with_timestamp: bool) { - let format = time::format_description::parse( - "[year]-[month]-[day] \ - [hour repr:24]:[minute]:[second] [offset_hour sign:mandatory]", - ) - .expect("static format string is valid"); - - let mut builder = env_logger::Builder::new(); - builder.filter_level(log::LevelFilter::Warn); - builder.filter_module("bridge", log::LevelFilter::Info); - builder.parse_default_env(); - if with_timestamp { - builder.format(move |buf, record| { - let timestamp = time::OffsetDateTime::now_local() - .unwrap_or_else(|_| time::OffsetDateTime::now_utc()); - let timestamp = timestamp.format(&format).unwrap_or_else(|_| timestamp.to_string()); - - let log_level = color_level(record.level()); - let log_target = color_target(record.target()); - let timestamp = if cfg!(windows) { - Either::Left(timestamp) - } else { - Either::Right(ansi_term::Colour::Fixed(8).bold().paint(timestamp)) - }; - - writeln!( - buf, - "{}{} {} {} {}", - loop_name_prefix(), - timestamp, - log_level, - log_target, - record.args(), - ) - }); - } else { - builder.format(move |buf, record| { - let log_level = color_level(record.level()); - let log_target = color_target(record.target()); - - writeln!(buf, "{}{} {} {}", loop_name_prefix(), log_level, log_target, record.args(),) - }); - } - - builder.init(); -} - -/// Initialize relay loop. Must only be called once per every loop task. -pub(crate) fn initialize_loop(loop_name: String) { - LOOP_NAME.with(|g_loop_name| *g_loop_name.borrow_mut() = loop_name); -} - -/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop` -/// call. -fn loop_name_prefix() -> String { - // try_with to avoid panic outside of async-std task context - LOOP_NAME - .try_with(|loop_name| { - // using borrow is ok here, because loop is only initialized once (=> borrow_mut will - // only be called once) - let loop_name = loop_name.borrow(); - if loop_name.is_empty() { - String::new() - } else { - format!("[{}] ", loop_name) - } - }) - .unwrap_or_else(|_| String::new()) -} - -enum Either { - Left(A), - Right(B), -} -impl Display for Either { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Self::Left(a) => write!(fmt, "{}", a), - Self::Right(b) => write!(fmt, "{}", b), - } - } -} - -fn color_target(target: &str) -> impl Display + '_ { - if cfg!(windows) { - Either::Left(target) - } else { - Either::Right(ansi_term::Colour::Fixed(8).paint(target)) - } -} - -fn color_level(level: log::Level) -> impl Display { - if cfg!(windows) { - Either::Left(level) - } else { - let s = level.to_string(); - use ansi_term::Colour as Color; - Either::Right(match level { - log::Level::Error => Color::Fixed(9).bold().paint(s), - log::Level::Warn => Color::Fixed(11).bold().paint(s), - log::Level::Info => Color::Fixed(10).paint(s), - log::Level::Debug => Color::Fixed(14).paint(s), - log::Level::Trace => Color::Fixed(12).paint(s), - }) - } -} diff --git a/polkadot/bridges/relays/utils/src/lib.rs b/polkadot/bridges/relays/utils/src/lib.rs deleted file mode 100644 index a335be79124..00000000000 --- a/polkadot/bridges/relays/utils/src/lib.rs +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities used by different relays. - -pub use bp_runtime::HeaderId; -pub use error::Error; -pub use relay_loop::{relay_loop, relay_metrics}; - -use backoff::{backoff::Backoff, ExponentialBackoff}; -use futures::future::FutureExt; -use std::time::Duration; -use thiserror::Error; - -/// Max delay after connection-unrelated error happened before we'll try the -/// same request again. -pub const MAX_BACKOFF_INTERVAL: Duration = Duration::from_secs(60); -/// Delay after connection-related error happened before we'll try -/// reconnection again. -pub const CONNECTION_ERROR_DELAY: Duration = Duration::from_secs(10); - -pub mod error; -pub mod initialize; -pub mod metrics; -pub mod relay_loop; - -/// Block number traits shared by all chains that relay is able to serve. -pub trait BlockNumberBase: - 'static - + From - + Into - + Ord - + Clone - + Copy - + Default - + Send - + Sync - + std::fmt::Debug - + std::fmt::Display - + std::hash::Hash - + std::ops::Add - + std::ops::Sub - + num_traits::CheckedSub - + num_traits::Saturating - + num_traits::Zero - + num_traits::One -{ -} - -impl BlockNumberBase for T where - T: 'static - + From - + Into - + Ord - + Clone - + Copy - + Default - + Send - + Sync - + std::fmt::Debug - + std::fmt::Display - + std::hash::Hash - + std::ops::Add - + std::ops::Sub - + num_traits::CheckedSub - + num_traits::Saturating - + num_traits::Zero - + num_traits::One -{ -} - -/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). -#[macro_export] -macro_rules! bail_on_error { - ($result: expr) => { - match $result { - (client, Ok(result)) => (client, result), - (client, Err(error)) => return (client, Err(error)), - } - }; -} - -/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). -#[macro_export] -macro_rules! bail_on_arg_error { - ($result: expr, $client: ident) => { - match $result { - Ok(result) => result, - Err(error) => return ($client, Err(error)), - } - }; -} - -/// Error type that can signal connection errors. -pub trait MaybeConnectionError { - /// Returns true if error (maybe) represents connection error. - fn is_connection_error(&self) -> bool; -} - -/// Stringified error that may be either connection-related or not. -#[derive(Error, Debug)] -pub enum StringifiedMaybeConnectionError { - /// The error is connection-related error. - #[error("{0}")] - Connection(String), - /// The error is connection-unrelated error. - #[error("{0}")] - NonConnection(String), -} - -impl StringifiedMaybeConnectionError { - /// Create new stringified connection error. - pub fn new(is_connection_error: bool, error: String) -> Self { - if is_connection_error { - StringifiedMaybeConnectionError::Connection(error) - } else { - StringifiedMaybeConnectionError::NonConnection(error) - } - } -} - -impl MaybeConnectionError for StringifiedMaybeConnectionError { - fn is_connection_error(&self) -> bool { - match *self { - StringifiedMaybeConnectionError::Connection(_) => true, - StringifiedMaybeConnectionError::NonConnection(_) => false, - } - } -} - -/// Exponential backoff for connection-unrelated errors retries. -pub fn retry_backoff() -> ExponentialBackoff { - ExponentialBackoff { - // we do not want relayer to stop - max_elapsed_time: None, - max_interval: MAX_BACKOFF_INTERVAL, - ..Default::default() - } -} - -/// Compact format of IDs vector. -pub fn format_ids(mut ids: impl ExactSizeIterator) -> String { - const NTH_PROOF: &str = "we have checked len; qed"; - match ids.len() { - 0 => "".into(), - 1 => format!("{:?}", ids.next().expect(NTH_PROOF)), - 2 => { - let id0 = ids.next().expect(NTH_PROOF); - let id1 = ids.next().expect(NTH_PROOF); - format!("[{:?}, {:?}]", id0, id1) - }, - len => { - let id0 = ids.next().expect(NTH_PROOF); - let id_last = ids.last().expect(NTH_PROOF); - format!("{}:[{:?} ... {:?}]", len, id0, id_last) - }, - } -} - -/// Stream that emits item every `timeout_ms` milliseconds. -pub fn interval(timeout: Duration) -> impl futures::Stream { - futures::stream::unfold((), move |_| async move { - async_std::task::sleep(timeout).await; - Some(((), ())) - }) -} - -/// Which client has caused error. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum FailedClient { - /// It is the source client who has caused error. - Source, - /// It is the target client who has caused error. - Target, - /// Both clients are failing, or we just encountered some other error that - /// should be treated like that. - Both, -} - -/// Future process result. -#[derive(Debug, Clone, Copy)] -pub enum ProcessFutureResult { - /// Future has been processed successfully. - Success, - /// Future has failed with non-connection error. - Failed, - /// Future has failed with connection error. - ConnectionFailed, -} - -impl ProcessFutureResult { - /// Returns true if result is Success. - pub fn is_ok(self) -> bool { - match self { - ProcessFutureResult::Success => true, - ProcessFutureResult::Failed | ProcessFutureResult::ConnectionFailed => false, - } - } - - /// Returns Ok(true) if future has succeeded. - /// Returns Ok(false) if future has failed with non-connection error. - /// Returns Err if future is `ConnectionFailed`. - pub fn fail_if_connection_error( - self, - failed_client: FailedClient, - ) -> Result { - match self { - ProcessFutureResult::Success => Ok(true), - ProcessFutureResult::Failed => Ok(false), - ProcessFutureResult::ConnectionFailed => Err(failed_client), - } - } -} - -/// Process result of the future from a client. -pub fn process_future_result( - result: Result, - retry_backoff: &mut ExponentialBackoff, - on_success: impl FnOnce(TResult), - go_offline_future: &mut std::pin::Pin<&mut futures::future::Fuse>, - go_offline: impl FnOnce(Duration) -> TGoOfflineFuture, - error_pattern: impl FnOnce() -> String, -) -> ProcessFutureResult -where - TError: std::fmt::Debug + MaybeConnectionError, - TGoOfflineFuture: FutureExt, -{ - match result { - Ok(result) => { - on_success(result); - retry_backoff.reset(); - ProcessFutureResult::Success - }, - Err(error) if error.is_connection_error() => { - log::error!( - target: "bridge", - "{}: {:?}. Going to restart", - error_pattern(), - error, - ); - - retry_backoff.reset(); - go_offline_future.set(go_offline(CONNECTION_ERROR_DELAY).fuse()); - ProcessFutureResult::ConnectionFailed - }, - Err(error) => { - let retry_delay = retry_backoff.next_backoff().unwrap_or(CONNECTION_ERROR_DELAY); - log::error!( - target: "bridge", - "{}: {:?}. Retrying in {}", - error_pattern(), - error, - retry_delay.as_secs_f64(), - ); - - go_offline_future.set(go_offline(retry_delay).fuse()); - ProcessFutureResult::Failed - }, - } -} diff --git a/polkadot/bridges/relays/utils/src/metrics.rs b/polkadot/bridges/relays/utils/src/metrics.rs deleted file mode 100644 index 084f72e7950..00000000000 --- a/polkadot/bridges/relays/utils/src/metrics.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -pub use float_json_value::FloatJsonValueMetric; -pub use global::GlobalMetrics; -pub use substrate_prometheus_endpoint::{ - prometheus::core::{Atomic, Collector}, - register, Counter, CounterVec, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, I64, U64, -}; - -use async_std::sync::{Arc, RwLock}; -use async_trait::async_trait; -use std::{fmt::Debug, time::Duration}; - -mod float_json_value; -mod global; - -/// Shared reference to `f64` value that is updated by the metric. -pub type F64SharedRef = Arc>>; -/// Int gauge metric type. -pub type IntGauge = Gauge; - -/// Unparsed address that needs to be used to expose Prometheus metrics. -#[derive(Debug, Clone)] -pub struct MetricsAddress { - /// Serve HTTP requests at given host. - pub host: String, - /// Serve HTTP requests at given port. - pub port: u16, -} - -/// Prometheus endpoint MetricsParams. -#[derive(Debug, Clone)] -pub struct MetricsParams { - /// Interface and TCP port to be used when exposing Prometheus metrics. - pub address: Option, - /// Metrics registry. May be `Some(_)` if several components share the same endpoint. - pub registry: Registry, -} - -/// Metric API. -pub trait Metric: Clone + Send + Sync + 'static { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError>; -} - -/// Standalone metric API. -/// -/// Metrics of this kind know how to update themselves, so we may just spawn and forget the -/// asynchronous self-update task. -#[async_trait] -pub trait StandaloneMetric: Metric { - /// Update metric values. - async fn update(&self); - - /// Metrics update interval. - fn update_interval(&self) -> Duration; - - /// Register and spawn metric. Metric is only spawned if it is registered for the first time. - fn register_and_spawn(self, registry: &Registry) -> Result<(), PrometheusError> { - match self.register(registry) { - Ok(()) => { - self.spawn(); - Ok(()) - }, - Err(PrometheusError::AlreadyReg) => Ok(()), - Err(e) => Err(e), - } - } - - /// Spawn the self update task that will keep update metric value at given intervals. - fn spawn(self) { - async_std::task::spawn(async move { - let update_interval = self.update_interval(); - loop { - self.update().await; - async_std::task::sleep(update_interval).await; - } - }); - } -} - -impl Default for MetricsAddress { - fn default() -> Self { - MetricsAddress { host: "127.0.0.1".into(), port: 9616 } - } -} - -impl MetricsParams { - /// Creates metrics params so that metrics are not exposed. - pub fn disabled() -> Self { - MetricsParams { address: None, registry: Registry::new() } - } - - /// Do not expose metrics. - pub fn disable(mut self) -> Self { - self.address = None; - self - } -} - -impl From> for MetricsParams { - fn from(address: Option) -> Self { - MetricsParams { address, registry: Registry::new() } - } -} - -/// Returns metric name optionally prefixed with given prefix. -pub fn metric_name(prefix: Option<&str>, name: &str) -> String { - if let Some(prefix) = prefix { - format!("{}_{}", prefix, name) - } else { - name.into() - } -} - -/// Set value of gauge metric. -/// -/// If value is `Ok(None)` or `Err(_)`, metric would have default value. -pub fn set_gauge_value, E: Debug>( - gauge: &Gauge, - value: Result, E>, -) { - gauge.set(match value { - Ok(Some(value)) => { - log::trace!( - target: "bridge-metrics", - "Updated value of metric '{:?}': {:?}", - gauge.desc().first().map(|d| &d.fq_name), - value, - ); - value - }, - Ok(None) => { - log::warn!( - target: "bridge-metrics", - "Failed to update metric '{:?}': value is empty", - gauge.desc().first().map(|d| &d.fq_name), - ); - Default::default() - }, - Err(error) => { - log::warn!( - target: "bridge-metrics", - "Failed to update metric '{:?}': {:?}", - gauge.desc().first().map(|d| &d.fq_name), - error, - ); - Default::default() - }, - }) -} diff --git a/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs b/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs deleted file mode 100644 index 7535cbef986..00000000000 --- a/polkadot/bridges/relays/utils/src/metrics/float_json_value.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::{self, Error}, - metrics::{ - metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, - StandaloneMetric, F64, - }, -}; - -use async_std::sync::{Arc, RwLock}; -use async_trait::async_trait; -use std::time::Duration; - -/// Value update interval. -const UPDATE_INTERVAL: Duration = Duration::from_secs(60); - -/// Metric that represents float value received from HTTP service as float gauge. -/// -/// The float value returned by the service is assumed to be normal (`f64::is_normal` -/// should return `true`) and strictly positive. -#[derive(Debug, Clone)] -pub struct FloatJsonValueMetric { - url: String, - json_path: String, - metric: Gauge, - shared_value_ref: F64SharedRef, -} - -impl FloatJsonValueMetric { - /// Create new metric instance with given name and help. - pub fn new( - url: String, - json_path: String, - name: String, - help: String, - ) -> Result { - let shared_value_ref = Arc::new(RwLock::new(None)); - Ok(FloatJsonValueMetric { - url, - json_path, - metric: Gauge::new(metric_name(None, &name), help)?, - shared_value_ref, - }) - } - - /// Get shared reference to metric value. - pub fn shared_value_ref(&self) -> F64SharedRef { - self.shared_value_ref.clone() - } - - /// Request value from HTTP service. - async fn request_value(&self) -> anyhow::Result { - use isahc::{AsyncReadResponseExt, HttpClient, Request}; - - let request = Request::get(&self.url).header("Accept", "application/json").body(())?; - let raw_response = HttpClient::new()?.send_async(request).await?.text().await?; - Ok(raw_response) - } - - /// Read value from HTTP service. - async fn read_value(&self) -> error::Result { - let raw_response = self.request_value().await.map_err(Error::FetchTokenPrice)?; - parse_service_response(&self.json_path, &raw_response) - } -} - -impl Metric for FloatJsonValueMetric { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.metric.clone(), registry).map(drop) - } -} - -#[async_trait] -impl StandaloneMetric for FloatJsonValueMetric { - fn update_interval(&self) -> Duration { - UPDATE_INTERVAL - } - - async fn update(&self) { - let value = self.read_value().await; - let maybe_ok = value.as_ref().ok().copied(); - crate::metrics::set_gauge_value(&self.metric, value.map(Some)); - *self.shared_value_ref.write().await = maybe_ok; - } -} - -/// Parse HTTP service response. -fn parse_service_response(json_path: &str, response: &str) -> error::Result { - let json = - serde_json::from_str(response).map_err(|err| Error::ParseHttp(err, response.to_owned()))?; - - let mut selector = jsonpath_lib::selector(&json); - let maybe_selected_value = - selector(json_path).map_err(|err| Error::SelectResponseValue(err, response.to_owned()))?; - let selected_value = maybe_selected_value - .first() - .and_then(|v| v.as_f64()) - .ok_or_else(|| Error::MissingResponseValue(response.to_owned()))?; - if !selected_value.is_normal() || selected_value < 0.0 { - return Err(Error::ParseFloat(selected_value)) - } - - Ok(selected_value) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_service_response_works() { - assert_eq!( - parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":433.05}}"#).map_err(drop), - Ok(433.05), - ); - } - - #[test] - fn parse_service_response_rejects_negative_numbers() { - assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":-433.05}}"#).is_err()); - } - - #[test] - fn parse_service_response_rejects_zero_numbers() { - assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":0.0}}"#).is_err()); - } - - #[test] - fn parse_service_response_rejects_nan() { - assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":NaN}}"#).is_err()); - } -} diff --git a/polkadot/bridges/relays/utils/src/metrics/global.rs b/polkadot/bridges/relays/utils/src/metrics/global.rs deleted file mode 100644 index df90a2c4823..00000000000 --- a/polkadot/bridges/relays/utils/src/metrics/global.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Global system-wide Prometheus metrics exposed by relays. - -use crate::metrics::{ - metric_name, register, Gauge, GaugeVec, Metric, Opts, PrometheusError, Registry, - StandaloneMetric, F64, U64, -}; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use std::time::Duration; -use sysinfo::{ProcessExt, RefreshKind, System, SystemExt}; - -/// Global metrics update interval. -const UPDATE_INTERVAL: Duration = Duration::from_secs(10); - -/// Global Prometheus metrics. -#[derive(Debug, Clone)] -pub struct GlobalMetrics { - system: Arc>, - system_average_load: GaugeVec, - process_cpu_usage_percentage: Gauge, - process_memory_usage_bytes: Gauge, -} - -impl GlobalMetrics { - /// Create and register global metrics. - pub fn new() -> Result { - Ok(GlobalMetrics { - system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), - system_average_load: GaugeVec::new( - Opts::new(metric_name(None, "system_average_load"), "System load average"), - &["over"], - )?, - process_cpu_usage_percentage: Gauge::new( - metric_name(None, "process_cpu_usage_percentage"), - "Process CPU usage", - )?, - process_memory_usage_bytes: Gauge::new( - metric_name(None, "process_memory_usage_bytes"), - "Process memory (resident set size) usage", - )?, - }) - } -} - -impl Metric for GlobalMetrics { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.system_average_load.clone(), registry)?; - register(self.process_cpu_usage_percentage.clone(), registry)?; - register(self.process_memory_usage_bytes.clone(), registry)?; - Ok(()) - } -} - -#[async_trait] -impl StandaloneMetric for GlobalMetrics { - async fn update(&self) { - // update system-wide metrics - let mut system = self.system.lock().await; - let load = system.get_load_average(); - self.system_average_load.with_label_values(&["1min"]).set(load.one); - self.system_average_load.with_label_values(&["5min"]).set(load.five); - self.system_average_load.with_label_values(&["15min"]).set(load.fifteen); - - // update process-related metrics - let pid = sysinfo::get_current_pid().expect( - "only fails where pid is unavailable (os=unknown || arch=wasm32);\ - relay is not supposed to run in such MetricsParamss;\ - qed", - ); - let is_process_refreshed = system.refresh_process(pid); - match (is_process_refreshed, system.get_process(pid)) { - (true, Some(process_info)) => { - let cpu_usage = process_info.cpu_usage() as f64; - let memory_usage = process_info.memory() * 1024; - log::trace!( - target: "bridge-metrics", - "Refreshed process metrics: CPU={}, memory={}", - cpu_usage, - memory_usage, - ); - - self.process_cpu_usage_percentage.set(if cpu_usage.is_finite() { - cpu_usage - } else { - 0f64 - }); - self.process_memory_usage_bytes.set(memory_usage); - }, - _ => { - log::warn!( - target: "bridge-metrics", - "Failed to refresh process information. Metrics may show obsolete values", - ); - }, - } - } - - fn update_interval(&self) -> Duration { - UPDATE_INTERVAL - } -} diff --git a/polkadot/bridges/relays/utils/src/relay_loop.rs b/polkadot/bridges/relays/utils/src/relay_loop.rs deleted file mode 100644 index 521a6345d3e..00000000000 --- a/polkadot/bridges/relays/utils/src/relay_loop.rs +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::Error, - metrics::{Metric, MetricsAddress, MetricsParams}, - FailedClient, MaybeConnectionError, -}; - -use async_trait::async_trait; -use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration}; -use substrate_prometheus_endpoint::{init_prometheus, Registry}; - -/// Default pause between reconnect attempts. -pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); - -/// Basic blockchain client from relay perspective. -#[async_trait] -pub trait Client: 'static + Clone + Send + Sync { - /// Type of error these clients returns. - type Error: 'static + Debug + MaybeConnectionError + Send + Sync; - - /// Try to reconnect to source node. - async fn reconnect(&mut self) -> Result<(), Self::Error>; -} - -#[async_trait] -impl Client for () { - type Error = crate::StringifiedMaybeConnectionError; - - async fn reconnect(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Returns generic loop that may be customized and started. -pub fn relay_loop(source_client: SC, target_client: TC) -> Loop { - Loop { reconnect_delay: RECONNECT_DELAY, source_client, target_client, loop_metric: None } -} - -/// Returns generic relay loop metrics that may be customized and used in one or several relay -/// loops. -pub fn relay_metrics(params: MetricsParams) -> LoopMetrics<(), (), ()> { - LoopMetrics { - relay_loop: Loop { - reconnect_delay: RECONNECT_DELAY, - source_client: (), - target_client: (), - loop_metric: None, - }, - address: params.address, - registry: params.registry, - loop_metric: None, - } -} - -/// Generic relay loop. -pub struct Loop { - reconnect_delay: Duration, - source_client: SC, - target_client: TC, - loop_metric: Option, -} - -/// Relay loop metrics builder. -pub struct LoopMetrics { - relay_loop: Loop, - address: Option, - registry: Registry, - loop_metric: Option, -} - -impl Loop { - /// Customize delay between reconnect attempts. - pub fn reconnect_delay(mut self, reconnect_delay: Duration) -> Self { - self.reconnect_delay = reconnect_delay; - self - } - - /// Start building loop metrics using given prefix. - pub fn with_metrics(self, params: MetricsParams) -> LoopMetrics { - LoopMetrics { - relay_loop: Loop { - reconnect_delay: self.reconnect_delay, - source_client: self.source_client, - target_client: self.target_client, - loop_metric: None, - }, - address: params.address, - registry: params.registry, - loop_metric: None, - } - } - - /// Run relay loop. - /// - /// This function represents an outer loop, which in turn calls provided `run_loop` function to - /// do actual job. When `run_loop` returns, this outer loop reconnects to failed client (source, - /// target or both) and calls `run_loop` again. - pub async fn run(mut self, loop_name: String, run_loop: R) -> Result<(), Error> - where - R: 'static + Send + Fn(SC, TC, Option) -> F, - F: 'static + Send + Future>, - SC: 'static + Client, - TC: 'static + Client, - LM: 'static + Send + Clone, - { - let run_loop_task = async move { - crate::initialize::initialize_loop(loop_name); - - loop { - let loop_metric = self.loop_metric.clone(); - let future_result = - run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric); - let result = future_result.await; - - match result { - Ok(()) => break, - Err(failed_client) => - reconnect_failed_client( - failed_client, - self.reconnect_delay, - &mut self.source_client, - &mut self.target_client, - ) - .await, - } - - log::debug!(target: "bridge", "Restarting relay loop"); - } - - Ok(()) - }; - - async_std::task::spawn(run_loop_task).await - } -} - -impl LoopMetrics { - /// Add relay loop metrics. - /// - /// Loop metrics will be passed to the loop callback. - pub fn loop_metric( - self, - metric: NewLM, - ) -> Result, Error> { - metric.register(&self.registry)?; - - Ok(LoopMetrics { - relay_loop: self.relay_loop, - address: self.address, - registry: self.registry, - loop_metric: Some(metric), - }) - } - - /// Convert into `MetricsParams` structure so that metrics registry may be extended later. - pub fn into_params(self) -> MetricsParams { - MetricsParams { address: self.address, registry: self.registry } - } - - /// Expose metrics using address passed at creation. - /// - /// If passed `address` is `None`, metrics are not exposed. - pub async fn expose(self) -> Result, Error> { - if let Some(address) = self.address { - let socket_addr = SocketAddr::new( - address - .host - .parse() - .map_err(|err| Error::ExposingMetricsInvalidHost(address.host.clone(), err))?, - address.port, - ); - - let registry = self.registry; - async_std::task::spawn(async move { - let runtime = - match tokio::runtime::Builder::new_current_thread().enable_all().build() { - Ok(runtime) => runtime, - Err(err) => { - log::trace!( - target: "bridge-metrics", - "Failed to create tokio runtime. Prometheus meterics are not available: {:?}", - err, - ); - return - }, - }; - - let _ = runtime.block_on(async move { - log::trace!( - target: "bridge-metrics", - "Starting prometheus endpoint at: {:?}", - socket_addr, - ); - let result = init_prometheus(socket_addr, registry).await; - log::trace!( - target: "bridge-metrics", - "Prometheus endpoint has exited with result: {:?}", - result, - ); - }); - }); - } - - Ok(Loop { - reconnect_delay: self.relay_loop.reconnect_delay, - source_client: self.relay_loop.source_client, - target_client: self.relay_loop.target_client, - loop_metric: self.loop_metric, - }) - } -} - -/// Deal with the client who has returned connection error. -pub async fn reconnect_failed_client( - failed_client: FailedClient, - reconnect_delay: Duration, - source_client: &mut impl Client, - target_client: &mut impl Client, -) { - loop { - async_std::task::sleep(reconnect_delay).await; - if failed_client == FailedClient::Both || failed_client == FailedClient::Source { - match source_client.reconnect().await { - Ok(()) => (), - Err(error) => { - log::warn!( - target: "bridge", - "Failed to reconnect to source client. Going to retry in {}s: {:?}", - reconnect_delay.as_secs(), - error, - ); - continue - }, - } - } - if failed_client == FailedClient::Both || failed_client == FailedClient::Target { - match target_client.reconnect().await { - Ok(()) => (), - Err(error) => { - log::warn!( - target: "bridge", - "Failed to reconnect to target client. Going to retry in {}s: {:?}", - reconnect_delay.as_secs(), - error, - ); - continue - }, - } - } - - break - } -} diff --git a/polkadot/bridges/rustfmt.toml b/polkadot/bridges/rustfmt.toml deleted file mode 100644 index 082150daf04..00000000000 --- a/polkadot/bridges/rustfmt.toml +++ /dev/null @@ -1,24 +0,0 @@ -# Basic -hard_tabs = true -max_width = 100 -use_small_heuristics = "Max" -# Imports -imports_granularity = "Crate" -reorder_imports = true -# Consistency -newline_style = "Unix" -# Format comments -comment_width = 100 -wrap_comments = true -# Misc -chain_width = 80 -spaces_around_ranges = false -binop_separator = "Back" -reorder_impl_items = false -match_arm_leading_pipes = "Preserve" -match_arm_blocks = false -match_block_trailing_comma = true -trailing_comma = "Vertical" -trailing_semicolon = false -use_field_init_shorthand = true - diff --git a/polkadot/bridges/scripts/add_license.sh b/polkadot/bridges/scripts/add_license.sh deleted file mode 100755 index 49864b47c05..00000000000 --- a/polkadot/bridges/scripts/add_license.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -PAT_GPL="^// Copyright.*If not, see \.$" -PAT_OTHER="^// Copyright" - -SCRIPTS_DIR=$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P ) - -for f in $(find . -type f | egrep '\.(c|cpp|rs)$'); do - HEADER=$(head -16 $f) - if [[ $HEADER =~ $PAT_GPL ]]; then - BODY=$(tail -n +17 $f) - cat $SCRIPTS_DIR/license_header > temp - echo "$BODY" >> temp - mv temp $f - elif [[ $HEADER =~ $PAT_OTHER ]]; then - echo "Other license was found do nothing" - else - echo "$f was missing header" - cat $SCRIPTS_DIR/license_header $f > temp - mv temp $f - fi -done diff --git a/polkadot/bridges/scripts/ci-cache.sh b/polkadot/bridges/scripts/ci-cache.sh deleted file mode 100755 index 040d44fa74a..00000000000 --- a/polkadot/bridges/scripts/ci-cache.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -set -xeu - -echo $CARGO_TARGET_DIR; -mkdir -p $CARGO_TARGET_DIR; -echo "Current Rust nightly version:"; -rustc +nightly --version; -echo "Cached Rust nightly version:"; -if [ ! -f $CARGO_TARGET_DIR/check_nightly_rust ]; then - echo "" > $CARGO_TARGET_DIR/check_nightly_rust; -fi -cat $CARGO_TARGET_DIR/check_nightly_rust; -if [[ $(cat $CARGO_TARGET_DIR/check_nightly_rust) == $(rustc +nightly --version) ]]; then - echo "The Rust nightly version has not changed"; -else - echo "The Rust nightly version has changed. Clearing the cache"; - rm -rf $CARGO_TARGET_DIR/*; -fi diff --git a/polkadot/bridges/scripts/dump-logs.sh b/polkadot/bridges/scripts/dump-logs.sh deleted file mode 100755 index e5a3a403ada..00000000000 --- a/polkadot/bridges/scripts/dump-logs.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# A script to dump logs from selected important docker containers -# to make it easier to analyze locally. - -set -xeu - -DATE=$(date +"%Y-%m-%d-%T") -LOGS_DIR="${DATE//:/-}-logs" -mkdir $LOGS_DIR -cd $LOGS_DIR - -# From $ docker ps --format '{{.Names}}' - -SERVICES=(\ - deployments_relay-messages-millau-to-rialto-generator_1 \ - deployments_relay-messages-rialto-to-millau-generator_1 \ - deployments_relay-messages-millau-to-rialto-lane-00000001_1 \ - deployments_relay-messages-rialto-to-millau-lane-00000001_1 \ - deployments_relay-millau-rialto_1 \ - deployments_relay-headers-westend-to-millau_1 \ - deployments_rialto-node-alice_1 \ - deployments_rialto-node-bob_1 \ - deployments_millau-node-alice_1 \ - deployments_millau-node-bob_1 \ -) - -for SVC in ${SERVICES[*]} -do - SHORT_NAME="${SVC//deployments_/}" - docker logs $SVC &> $SHORT_NAME.log | true -done - -cd - -tar cvjf $LOGS_DIR.tar.bz2 $LOGS_DIR diff --git a/polkadot/bridges/scripts/license_header b/polkadot/bridges/scripts/license_header deleted file mode 100644 index f9b301209bb..00000000000 --- a/polkadot/bridges/scripts/license_header +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - diff --git a/polkadot/bridges/scripts/send-message-from-millau-rialto.sh b/polkadot/bridges/scripts/send-message-from-millau-rialto.sh deleted file mode 100755 index d14b08021ee..00000000000 --- a/polkadot/bridges/scripts/send-message-from-millau-rialto.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Used for manually sending a message to a running network. -# -# You could for example spin up a full network using the Docker Compose files -# we have (to make sure the message relays are running), but remove the message -# generator service. From there you may submit messages manually using this script. - -MILLAU_PORT="${RIALTO_PORT:-9945}" - -case "$1" in - remark) - RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message millau-to-rialto \ - --source-host localhost \ - --source-port $MILLAU_PORT \ - --source-signer //Alice \ - --target-signer //Bob \ - --lane 00000000 \ - --origin Target \ - remark \ - ;; - transfer) - RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message millau-to-rialto \ - --source-host localhost \ - --source-port $MILLAU_PORT \ - --source-signer //Alice \ - --target-signer //Bob \ - --lane 00000000 \ - --origin Target \ - transfer \ - --amount 100000000000000 \ - --recipient 5DZvVvd1udr61vL7Xks17TFQ4fi9NiagYLaBobnbPCP14ewA \ - ;; - *) echo "A message type is require. Supported messages: remark, transfer."; exit 1;; -esac diff --git a/polkadot/bridges/scripts/send-message-from-rialto-millau.sh b/polkadot/bridges/scripts/send-message-from-rialto-millau.sh deleted file mode 100755 index 10582aa6b3a..00000000000 --- a/polkadot/bridges/scripts/send-message-from-rialto-millau.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -# Used for manually sending a message to a running network. -# -# You could for example spin up a full network using the Docker Compose files -# we have (to make sure the message relays are running), but remove the message -# generator service. From there you may submit messages manually using this script. - -RIALTO_PORT="${RIALTO_PORT:-9944}" - -case "$1" in - remark) - RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message rialto-to-millau \ - --source-host localhost \ - --source-port $RIALTO_PORT \ - --target-signer //Alice \ - --source-signer //Bob \ - --lane 00000000 \ - --origin Target \ - remark \ - ;; - transfer) - RUST_LOG=runtime=trace,substrate-relay=trace,bridge=trace \ - ./target/debug/substrate-relay send-message rialto-to-millau \ - --source-host localhost \ - --source-port $RIALTO_PORT \ - --target-signer //Alice \ - --source-signer //Bob \ - --lane 00000000 \ - --origin Target \ - transfer \ - --amount 100000000000000 \ - --recipient 5DZvVvd1udr61vL7Xks17TFQ4fi9NiagYLaBobnbPCP14ewA \ - ;; - *) echo "A message type is require. Supported messages: remark, transfer."; exit 1;; -esac diff --git a/polkadot/bridges/scripts/update-weights-setup.sh b/polkadot/bridges/scripts/update-weights-setup.sh deleted file mode 100644 index 72534423d63..00000000000 --- a/polkadot/bridges/scripts/update-weights-setup.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -set -exu - -# Set up the standardized machine and run `update-weights.sh` script. -# The system is assumed to be pristine Ubuntu 20.04 and we install -# all required dependencies. - -# To avoid interruptions you might want to run this script in `screen` cause it will take a while -# to finish. - -# We start off with upgrading the system -apt update && apt dist-upgrade - -# and installing `git` and other required deps. -apt install -y git clang curl libssl-dev llvm libudev-dev screen - -# Now we clone the repository -git clone https://github.com/paritytech/parity-bridges-common.git -cd parity-bridges-common - -# Install rustup & toolchain -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | bash -s -- -y - -# Source config -source ~/.cargo/env - -# Add nightly and WASM -rustup install nightly -rustup target add wasm32-unknown-unknown --toolchain nightly - -# Update the weights -./scripts/update-weights.sh diff --git a/polkadot/bridges/scripts/update-weights.sh b/polkadot/bridges/scripts/update-weights.sh deleted file mode 100755 index b772386e759..00000000000 --- a/polkadot/bridges/scripts/update-weights.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/sh -# -# Runtime benchmarks for the `pallet-bridge-messages` and `pallet-bridge-grandpa` pallets. -# -# Run this script from root of the repo. - -set -eux - -time cargo run --release -p millau-bridge-node --features=runtime-benchmarks -- benchmark \ - --chain=dev \ - --steps=50 \ - --repeat=20 \ - --pallet=pallet_bridge_messages \ - --extrinsic=* \ - --execution=wasm \ - --wasm-execution=Compiled \ - --heap-pages=4096 \ - --output=./modules/messages/src/weights.rs \ - --template=./.maintain/millau-weight-template.hbs - -time cargo run --release -p millau-bridge-node --features=runtime-benchmarks -- benchmark \ - --chain=dev \ - --steps=50 \ - --repeat=20 \ - --pallet=pallet_bridge_grandpa \ - --extrinsic=* \ - --execution=wasm \ - --wasm-execution=Compiled \ - --heap-pages=4096 \ - --output=./modules/grandpa/src/weights.rs \ - --template=./.maintain/millau-weight-template.hbs - -time cargo run --release -p millau-bridge-node --features=runtime-benchmarks -- benchmark \ - --chain=dev \ - --steps=50 \ - --repeat=20 \ - --pallet=pallet_bridge_token_swap \ - --extrinsic=* \ - --execution=wasm \ - --wasm-execution=Compiled \ - --heap-pages=4096 \ - --output=./modules/token-swap/src/weights.rs \ - --template=./.maintain/millau-weight-template.hbs diff --git a/polkadot/bridges/scripts/update_substrate.sh b/polkadot/bridges/scripts/update_substrate.sh deleted file mode 100755 index f7715bda5d1..00000000000 --- a/polkadot/bridges/scripts/update_substrate.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# One-liner to update between Substrate releases -# Usage: ./update_substrate.sh 2.0.0-rc6 2.0.0 -set -xeu - -OLD_VERSION=$1 -NEW_VERSION=$2 - -find . -type f -name 'Cargo.toml' -exec sed -i '' -e "s/$OLD_VERSION/$NEW_VERSION/g" {} \; diff --git a/polkadot/doc/testing.md b/polkadot/doc/testing.md index fa7e3915861..78ad77e0e0f 100644 --- a/polkadot/doc/testing.md +++ b/polkadot/doc/testing.md @@ -108,7 +108,6 @@ Fuzzing is an approach to verify correctness against arbitrary or partially stru Currently implemented fuzzing targets: * `erasure-coding` -* `bridges/storage-proof` The tooling of choice here is `honggfuzz-rs` as it allows _fastest_ coverage according to "some paper" which is a positive feature when run as part of PRs. diff --git a/polkadot/node/service/src/chain_spec.rs b/polkadot/node/service/src/chain_spec.rs index 3699b656d90..350a0080f18 100644 --- a/polkadot/node/service/src/chain_spec.rs +++ b/polkadot/node/service/src/chain_spec.rs @@ -1066,22 +1066,6 @@ fn rococo_staging_testnet_config_genesis(wasm_binary: &[u8]) -> rococo_runtime:: }, xcm_pallet: Default::default(), transaction_payment: Default::default(), - bridge_rococo_grandpa: rococo_runtime::BridgeRococoGrandpaConfig { - owner: Some(endowed_accounts[0].clone()), - ..Default::default() - }, - bridge_wococo_grandpa: rococo_runtime::BridgeWococoGrandpaConfig { - owner: Some(endowed_accounts[0].clone()), - ..Default::default() - }, - bridge_rococo_messages: rococo_runtime::BridgeRococoMessagesConfig { - owner: Some(endowed_accounts[0].clone()), - ..Default::default() - }, - bridge_wococo_messages: rococo_runtime::BridgeWococoMessagesConfig { - owner: Some(endowed_accounts[0].clone()), - ..Default::default() - }, } } @@ -1629,22 +1613,6 @@ pub fn rococo_testnet_genesis( }, xcm_pallet: Default::default(), transaction_payment: Default::default(), - bridge_rococo_grandpa: rococo_runtime::BridgeRococoGrandpaConfig { - owner: Some(root_key.clone()), - ..Default::default() - }, - bridge_wococo_grandpa: rococo_runtime::BridgeWococoGrandpaConfig { - owner: Some(root_key.clone()), - ..Default::default() - }, - bridge_rococo_messages: rococo_runtime::BridgeRococoMessagesConfig { - owner: Some(root_key.clone()), - ..Default::default() - }, - bridge_wococo_messages: rococo_runtime::BridgeWococoMessagesConfig { - owner: Some(root_key.clone()), - ..Default::default() - }, } } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index d23e48b99c6..51ae5b355e1 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -71,16 +71,6 @@ xcm-executor = { package = "xcm-executor", path = "../../xcm/xcm-executor", defa xcm-builder = { package = "xcm-builder", path = "../../xcm/xcm-builder", default-features = false } pallet-xcm = { path = "../../xcm/pallet-xcm", default-features = false } -# Bridge Dependencies -bp-messages = { path = "../../bridges/primitives/messages", default-features = false } -bp-rococo = { path = "../../bridges/primitives/chain-rococo", default-features = false } -bp-runtime = { path = "../../bridges/primitives/runtime", default-features = false } -bp-wococo = { path = "../../bridges/primitives/chain-wococo", default-features = false } -bridge-runtime-common = { path = "../../bridges/bin/runtime-common", default-features = false } -pallet-bridge-dispatch = { path = "../../bridges/modules/dispatch", default-features = false } -pallet-bridge-grandpa = { path = "../../bridges/modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../bridges/modules/messages", default-features = false } - # Benchmarking Dependencies frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } hex-literal = { version = "0.3.4", optional = true } @@ -95,11 +85,6 @@ no_std = [] std = [ "authority-discovery-primitives/std", "babe-primitives/std", - "bp-messages/std", - "bp-rococo/std", - "bp-runtime/std", - "bp-wococo/std", - "bridge-runtime-common/std", "parity-scale-codec/std", "scale-info/std", "frame-executive/std", @@ -108,9 +93,6 @@ std = [ "pallet-babe/std", "beefy-primitives/std", "pallet-balances/std", - "pallet-bridge-dispatch/std", - "pallet-bridge-grandpa/std", - "pallet-bridge-messages/std", "pallet-collective/std", "pallet-beefy/std", "pallet-beefy-mmr/std", diff --git a/polkadot/runtime/rococo/src/bridge_messages.rs b/polkadot/runtime/rococo/src/bridge_messages.rs deleted file mode 100644 index 5302768b84d..00000000000 --- a/polkadot/runtime/rococo/src/bridge_messages.rs +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Over-bridge messaging support for Rococo <> Wococo bridge. - -pub use self::{at_rococo::*, at_wococo::*}; - -use crate::{Balances, Runtime}; - -use bp_messages::{ - source_chain::{SenderOrigin, TargetHeaderChain}, - target_chain::{ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageNonce, -}; -use bp_rococo::{Balance, Rococo, EXTRA_STORAGE_PROOF_SIZE, MAXIMAL_ENCODED_ACCOUNT_ID_SIZE}; -use bp_runtime::{Chain, ChainId, ROCOCO_CHAIN_ID, WOCOCO_CHAIN_ID}; -use bridge_runtime_common::messages::{ - source as messages_source, target as messages_target, transaction_payment, - BridgedChainWithMessages, ChainWithMessages, MessageBridge, MessageTransaction, - ThisChainWithMessages, -}; -use frame_support::{ - traits::Get, - weights::{Weight, WeightToFee as WeightToFeeT}, - RuntimeDebug, -}; -use rococo_runtime_constants::fee::WeightToFee; -use sp_runtime::FixedU128; -use sp_std::{marker::PhantomData, ops::RangeInclusive}; - -/// Maximal number of pending outbound messages. -const MAXIMAL_PENDING_MESSAGES_AT_OUTBOUND_LANE: MessageNonce = - bp_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; -/// Maximal weight of single message delivery confirmation transaction on Rococo/Wococo chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_delivery_proof` weight formula -/// computation for the case when single message is confirmed. The result then must be rounded up to account -/// possible future runtime upgrades. -const MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT: Weight = 2_000_000_000; -/// Increase of delivery transaction weight on Rococo/Wococo chain with every additional message byte. -/// -/// This value is a result of `pallet_bridge_messages::WeightInfoExt::storage_proof_size_overhead(1)` call. The -/// result then must be rounded up to account possible future runtime upgrades. -const ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT: Weight = 25_000; -/// Weight of single regular message delivery transaction on Rococo/Wococo chain. -/// -/// This value is a result of `pallet_bridge_messages::Pallet::receive_messages_proof_weight()` call -/// for the case when single message of `pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH` bytes is delivered. -/// The message must have dispatch weight set to zero. The result then must be rounded up to account -/// possible future runtime upgrades. -const DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT: Weight = 1_500_000_000; -/// Weight of pay-dispatch-fee operation for inbound messages at Rococo/Wococo chain. -/// -/// This value corresponds to the result of `pallet_bridge_messages::WeightInfoExt::pay_inbound_dispatch_fee_overhead()` -/// call for your chain. Don't put too much reserve there, because it is used to **decrease** -/// `DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT` cost. So putting large reserve would make delivery transactions cheaper. -const PAY_INBOUND_DISPATCH_FEE_WEIGHT: Weight = 600_000_000; -/// Number of bytes, included in the signed Rococo/Wococo transaction apart from the encoded call itself. -/// -/// Can be computed by subtracting encoded call size from raw transaction size. -const TX_EXTRA_BYTES: u32 = 130; - -/// Rococo chain as it is seen at Rococo. -pub type RococoAtRococo = - RococoLikeChain; - -/// Rococo chain as it is seen at Wococo. -pub type RococoAtWococo = - RococoLikeChain; - -/// Wococo chain as it is seen at Wococo. -pub type WococoAtWococo = - RococoLikeChain; - -/// Wococo chain as it is seen at Rococo. -pub type WococoAtRococo = - RococoLikeChain; - -/// Rococo/Wococo chain from message lane point of view. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct RococoLikeChain { - _bridge_definition: PhantomData, - _at_this_chain_grandpa_pallet_instance: PhantomData, -} - -impl ChainWithMessages for RococoLikeChain { - type Hash = crate::Hash; - type AccountId = crate::AccountId; - type Signer = primitives::v2::AccountPublic; - type Signature = crate::Signature; - type Weight = Weight; - type Balance = Balance; -} - -impl ThisChainWithMessages for RococoLikeChain { - type Origin = crate::Origin; - type Call = crate::Call; - - fn is_message_accepted(_submitter: &crate::Origin, lane: &LaneId) -> bool { - *lane == [0, 0, 0, 0] - } - - fn maximal_pending_messages_at_outbound_lane() -> MessageNonce { - MAXIMAL_PENDING_MESSAGES_AT_OUTBOUND_LANE - } - - fn estimate_delivery_confirmation_transaction() -> MessageTransaction { - let inbound_data_size = InboundLaneData::::encoded_size_hint( - MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - 1, - 1, - ) - .unwrap_or(u32::MAX); - - MessageTransaction { - dispatch_weight: MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - size: inbound_data_size - .saturating_add(EXTRA_STORAGE_PROOF_SIZE) - .saturating_add(TX_EXTRA_BYTES), - } - } - - fn transaction_payment(transaction: MessageTransaction) -> Balance { - // current fee multiplier is used here - transaction_payment( - crate::BlockWeights::get() - .get(frame_support::weights::DispatchClass::Normal) - .base_extrinsic, - crate::TransactionByteFee::get(), - pallet_transaction_payment::Pallet::::next_fee_multiplier(), - |weight| WeightToFee::weight_to_fee(&weight), - transaction, - ) - } -} - -impl BridgedChainWithMessages for RococoLikeChain { - fn maximal_extrinsic_size() -> u32 { - Rococo::max_extrinsic_size() - } - - fn message_weight_limits(_message_payload: &[u8]) -> RangeInclusive { - // we don't want to relay too large messages + keep reserve for future upgrades - let upper_limit = messages_target::maximal_incoming_message_dispatch_weight( - Rococo::max_extrinsic_weight(), - ); - - // we're charging for payload bytes in `With(Wococo | Rococo)MessageBridge::transaction_payment` function - // - // this bridge may be used to deliver all kind of messages, so we're not making any assumptions about - // minimal dispatch weight here - - 0..=upper_limit - } - - fn estimate_delivery_transaction( - message_payload: &[u8], - include_pay_dispatch_fee_cost: bool, - message_dispatch_weight: Weight, - ) -> MessageTransaction { - let message_payload_len = u32::try_from(message_payload.len()).unwrap_or(u32::MAX); - let extra_bytes_in_payload = Weight::from(message_payload_len) - .saturating_sub(pallet_bridge_messages::EXPECTED_DEFAULT_MESSAGE_LENGTH.into()); - - MessageTransaction { - dispatch_weight: extra_bytes_in_payload - .saturating_mul(ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT) - .saturating_add(DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT) - .saturating_sub(if include_pay_dispatch_fee_cost { - 0 - } else { - PAY_INBOUND_DISPATCH_FEE_WEIGHT - }) - .saturating_add(message_dispatch_weight), - size: message_payload_len - .saturating_add(EXTRA_STORAGE_PROOF_SIZE) - .saturating_add(TX_EXTRA_BYTES), - } - } - - fn transaction_payment(transaction: MessageTransaction) -> Balance { - // current fee multiplier is used here - bridge_runtime_common::messages::transaction_payment( - crate::BlockWeights::get() - .get(frame_support::weights::DispatchClass::Normal) - .base_extrinsic, - crate::TransactionByteFee::get(), - pallet_transaction_payment::Pallet::::next_fee_multiplier(), - |weight| WeightToFee::weight_to_fee(&weight), - transaction, - ) - } -} - -impl TargetHeaderChain, crate::AccountId> - for RococoLikeChain -where - B: MessageBridge, - B::ThisChain: ChainWithMessages, - B::BridgedChain: ChainWithMessages, - GI: 'static, - Runtime: pallet_bridge_grandpa::Config, - <>::BridgedChain as bp_runtime::Chain>::Hash: - From, -{ - type Error = &'static str; - type MessagesDeliveryProof = - messages_source::FromBridgedChainMessagesDeliveryProof; - - fn verify_message( - payload: &messages_source::FromThisChainMessagePayload, - ) -> Result<(), Self::Error> { - messages_source::verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), Self::Error> { - messages_source::verify_messages_delivery_proof::(proof) - } -} - -impl SourceHeaderChain for RococoLikeChain -where - B: MessageBridge, - B::BridgedChain: ChainWithMessages, - GI: 'static, - Runtime: pallet_bridge_grandpa::Config, - <>::BridgedChain as bp_runtime::Chain>::Hash: - From, -{ - type Error = &'static str; - type MessagesProof = messages_target::FromBridgedChainMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result>, Self::Error> { - messages_target::verify_messages_proof::(proof, messages_count) - .and_then(verify_inbound_messages_lane) - } -} - -/// Error that happens when we are receiving incoming message via unexpected lane. -const INBOUND_LANE_DISABLED: &str = "The inbound message lane is disabled."; - -/// Verify that lanes of inbound messages are enabled. -fn verify_inbound_messages_lane( - messages: ProvedMessages>, -) -> Result>, &'static str> { - let allowed_incoming_lanes = [[0, 0, 0, 0]]; - if messages.keys().any(|lane_id| !allowed_incoming_lanes.contains(lane_id)) { - return Err(INBOUND_LANE_DISABLED) - } - Ok(messages) -} - -/// The cost of delivery confirmation transaction. -pub struct GetDeliveryConfirmationTransactionFee; - -impl Get for GetDeliveryConfirmationTransactionFee { - fn get() -> Balance { - ::transaction_payment( - RococoAtRococo::estimate_delivery_confirmation_transaction(), - ) - } -} - -impl SenderOrigin for crate::Origin { - fn linked_account(&self) -> Option { - match self.caller { - crate::OriginCaller::system(frame_system::RawOrigin::Signed(ref submitter)) => - Some(submitter.clone()), - crate::OriginCaller::system(frame_system::RawOrigin::Root) | - crate::OriginCaller::system(frame_system::RawOrigin::None) => - crate::RootAccountForPayments::get(), - _ => None, - } - } -} - -/// This module contains definitions that are used by the messages pallet instance, "deployed" at Rococo. -mod at_rococo { - use super::*; - - /// Message bridge that is "deployed" at Rococo chain and connecting it to Wococo chain. - #[derive(RuntimeDebug, Clone, Copy)] - pub struct AtRococoWithWococoMessageBridge; - - impl MessageBridge for AtRococoWithWococoMessageBridge { - const THIS_CHAIN_ID: ChainId = ROCOCO_CHAIN_ID; - const BRIDGED_CHAIN_ID: ChainId = WOCOCO_CHAIN_ID; - const RELAYER_FEE_PERCENT: u32 = 10; - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_rococo::WITH_ROCOCO_MESSAGES_PALLET_NAME; - - type ThisChain = RococoAtRococo; - type BridgedChain = WococoAtRococo; - - fn bridged_balance_to_this_balance( - bridged_balance: bp_wococo::Balance, - _bridged_to_this_conversion_rate_override: Option, - ) -> bp_rococo::Balance { - bridged_balance - } - } - - /// Message payload for Rococo -> Wococo messages as it is seen at the Rococo. - pub type ToWococoMessagePayload = - messages_source::FromThisChainMessagePayload; - - /// Message verifier for Rococo -> Wococo messages at Rococo. - pub type ToWococoMessageVerifier = - messages_source::FromThisChainMessageVerifier; - - /// Message payload for Wococo -> Rococo messages as it is seen at Rococo. - pub type FromWococoMessagePayload = - messages_target::FromBridgedChainMessagePayload; - - /// Encoded Rococo Call as it comes from Wococo. - pub type FromWococoEncodedCall = - messages_target::FromBridgedChainEncodedMessageCall; - - /// Call-dispatch based message dispatch for Wococo -> Rococo messages. - pub type FromWococoMessageDispatch = messages_target::FromBridgedChainMessageDispatch< - AtRococoWithWococoMessageBridge, - Runtime, - Balances, - crate::AtRococoFromWococoMessagesDispatch, - >; -} - -/// This module contains definitions that are used by the messages pallet instance, "deployed" at Wococo. -mod at_wococo { - use super::*; - - /// Message bridge that is "deployed" at Wococo chain and connecting it to Rococo chain. - #[derive(RuntimeDebug, Clone, Copy)] - pub struct AtWococoWithRococoMessageBridge; - - impl MessageBridge for AtWococoWithRococoMessageBridge { - const THIS_CHAIN_ID: ChainId = WOCOCO_CHAIN_ID; - const BRIDGED_CHAIN_ID: ChainId = ROCOCO_CHAIN_ID; - const RELAYER_FEE_PERCENT: u32 = 10; - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = - bp_wococo::WITH_WOCOCO_MESSAGES_PALLET_NAME; - - type ThisChain = WococoAtWococo; - type BridgedChain = RococoAtWococo; - - fn bridged_balance_to_this_balance( - bridged_balance: bp_rococo::Balance, - _bridged_to_this_conversion_rate_override: Option, - ) -> bp_wococo::Balance { - bridged_balance - } - } - - /// Message payload for Wococo -> Rococo messages as it is seen at the Wococo. - pub type ToRococoMessagePayload = - messages_source::FromThisChainMessagePayload; - - /// Message verifier for Wococo -> Rococo messages at Wococo. - pub type ToRococoMessageVerifier = - messages_source::FromThisChainMessageVerifier; - - /// Message payload for Rococo -> Wococo messages as it is seen at Wococo. - pub type FromRococoMessagePayload = - messages_target::FromBridgedChainMessagePayload; - - /// Encoded Wococo Call as it comes from Rococo. - pub type FromRococoEncodedCall = - messages_target::FromBridgedChainEncodedMessageCall; - - /// Call-dispatch based message dispatch for Rococo -> Wococo messages. - pub type FromRococoMessageDispatch = messages_target::FromBridgedChainMessageDispatch< - AtWococoWithRococoMessageBridge, - Runtime, - Balances, - crate::AtWococoFromRococoMessagesDispatch, - >; -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_messages::{target_chain::ProvedLaneMessages, MessageData, MessageKey}; - use bridge_runtime_common::messages; - use parity_scale_codec::{Decode, Encode}; - use sp_runtime::traits::TrailingZeroInput; - - #[test] - fn ensure_rococo_messages_weights_are_correct() { - // **NOTE**: the main purpose of this test is to be sure that any message that is sumbitted - // to (any) inbound lane in Rococo<>Wococo bridge can be delivered to the bridged chain. - // Since we deal with testnets here, in case of failure + urgency: - // - // 1) ping bridges team about this failure (see the CODEOWNERS file if you're unsure who to ping); - // 2) comment/#[ignore] the test. - - // we don't have any knowledge of messages-at-Rococo weights, so we'll be using - // weights of one of our testnets, which should be accurate enough - type Weights = pallet_bridge_messages::weights::MillauWeight; - - pallet_bridge_messages::ensure_weights_are_correct::( - DEFAULT_MESSAGE_DELIVERY_TX_WEIGHT, - ADDITIONAL_MESSAGE_BYTE_DELIVERY_WEIGHT, - MAX_SINGLE_MESSAGE_DELIVERY_CONFIRMATION_TX_WEIGHT, - PAY_INBOUND_DISPATCH_FEE_WEIGHT, - crate::RocksDbWeight::get(), - ); - - let max_incoming_message_proof_size = bp_rococo::EXTRA_STORAGE_PROOF_SIZE.saturating_add( - messages::target::maximal_incoming_message_size(Rococo::max_extrinsic_size()), - ); - pallet_bridge_messages::ensure_able_to_receive_message::( - Rococo::max_extrinsic_size(), - Rococo::max_extrinsic_weight(), - max_incoming_message_proof_size, - messages::target::maximal_incoming_message_dispatch_weight( - Rococo::max_extrinsic_weight(), - ), - ); - - let max_incoming_inbound_lane_data_proof_size = - bp_messages::InboundLaneData::<()>::encoded_size_hint( - bp_rococo::MAXIMAL_ENCODED_ACCOUNT_ID_SIZE, - bp_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX as _, - bp_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX as _, - ) - .unwrap_or(u32::MAX); - pallet_bridge_messages::ensure_able_to_receive_confirmation::( - Rococo::max_extrinsic_size(), - Rococo::max_extrinsic_weight(), - max_incoming_inbound_lane_data_proof_size, - bp_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - bp_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - crate::RocksDbWeight::get(), - ); - } - - #[test] - fn ensure_rococo_tx_extra_bytes_constant_is_correct() { - // **NOTE**: this test checks that we're computing transaction fee (for bridged chain, which, in - // case of Rococo<>Wococo, means any chain) on-chain properly. If this assert fails: - // - // 1) just fix the `TX_EXTRA_BYTES` constant to actual (or sightly rounded up) value; - // 2) (only if it has changed significantly (> x2 times)) ping the bridges team (see the CODEOWNERS - // file if you're unsure who to ping) - - let signed_extra: crate::SignedExtra = ( - frame_system::CheckNonZeroSender::new(), - frame_system::CheckSpecVersion::new(), - frame_system::CheckTxVersion::new(), - frame_system::CheckGenesis::new(), - frame_system::CheckMortality::from(sp_runtime::generic::Era::mortal( - u64::MAX, - u64::MAX, - )), - frame_system::CheckNonce::from(primitives::v2::Nonce::MAX), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from( - primitives::v2::Balance::MAX, - ), - ); - let mut zeroes = TrailingZeroInput::zeroes(); - let extra_bytes_in_transaction = signed_extra.encoded_size() + - crate::Address::decode(&mut zeroes).unwrap().encoded_size() + - crate::Signature::decode(&mut zeroes).unwrap().encoded_size(); - assert!( - TX_EXTRA_BYTES as usize >= extra_bytes_in_transaction, - "Hardcoded number of extra bytes in Rococo transaction {} is lower than actual value: {}", - TX_EXTRA_BYTES, - extra_bytes_in_transaction, - ); - } - - fn proved_messages(lane_id: LaneId) -> ProvedMessages> { - vec![( - lane_id, - ProvedLaneMessages { - lane_state: None, - messages: vec![Message { - key: MessageKey { lane_id, nonce: 0 }, - data: MessageData { payload: vec![], fee: 0 }, - }], - }, - )] - .into_iter() - .collect() - } - - #[test] - fn verify_inbound_messages_lane_succeeds() { - assert_eq!( - verify_inbound_messages_lane(proved_messages([0, 0, 0, 0])), - Ok(proved_messages([0, 0, 0, 0])), - ); - } - - #[test] - fn verify_inbound_messages_lane_fails() { - assert_eq!( - verify_inbound_messages_lane(proved_messages([0, 0, 0, 1])), - Err(INBOUND_LANE_DISABLED), - ); - - let proved_messages = proved_messages([0, 0, 0, 0]) - .into_iter() - .chain(proved_messages([0, 0, 0, 1])) - .collect(); - assert_eq!(verify_inbound_messages_lane(proved_messages), Err(INBOUND_LANE_DISABLED),); - } -} diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index c3dd954889e..2ef7cf0480f 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -58,7 +58,7 @@ use sp_runtime::{ OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, + ApplyExtrinsicResult, KeyTypeId, }; use sp_staking::SessionIndex; use sp_std::{collections::btree_map::BTreeMap, prelude::*}; @@ -74,16 +74,11 @@ use runtime_parachains::{ session_info as parachains_session_info, shared as parachains_shared, ump as parachains_ump, }; -use bridge_runtime_common::messages::{ - source::estimate_message_dispatch_and_delivery_fee, MessageBridge, -}; - pub use frame_system::Call as SystemCall; /// Constant values used within the runtime. use rococo_runtime_constants::{currency::*, fee::*, time::*}; -mod bridge_messages; mod validator_manager; mod weights; pub mod xcm_config; @@ -231,19 +226,6 @@ construct_runtime! { // Validator Manager pallet. ValidatorManager: validator_manager, - // It might seem strange that we add both sides of the bridge to the same runtime. We do this because this - // runtime as shared by both the Rococo and Wococo chains. When running as Rococo we only use - // `BridgeWococoGrandpa`, and vice versa. - BridgeRococoGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage, Config} = 40, - BridgeWococoGrandpa: pallet_bridge_grandpa::::{Pallet, Call, Storage, Config} = 41, - - // Bridge messages support. The same story as with the bridge grandpa pallet above ^^^ - when we're - // running as Rococo we only use `BridgeWococoMessages`/`BridgeWococoMessagesDispatch`, and vice versa. - BridgeRococoMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config} = 43, - BridgeWococoMessages: pallet_bridge_messages::::{Pallet, Call, Storage, Event, Config} = 44, - BridgeRococoMessagesDispatch: pallet_bridge_dispatch::{Pallet, Event} = 45, - BridgeWococoMessagesDispatch: pallet_bridge_dispatch::::{Pallet, Event} = 46, - // A "council" Collective: pallet_collective = 80, Membership: pallet_membership = 81, @@ -748,137 +730,6 @@ parameter_types! { pub const HeadersToKeep: u32 = 7 * DAYS as u32; } -pub type RococoGrandpaInstance = (); -impl pallet_bridge_grandpa::Config for Runtime { - type BridgedChain = bp_rococo::Rococo; - type MaxRequests = MaxRequests; - type HeadersToKeep = HeadersToKeep; - - type WeightInfo = pallet_bridge_grandpa::weights::MillauWeight; -} - -pub type WococoGrandpaInstance = pallet_bridge_grandpa::Instance1; -impl pallet_bridge_grandpa::Config for Runtime { - type BridgedChain = bp_wococo::Wococo; - type MaxRequests = MaxRequests; - type HeadersToKeep = HeadersToKeep; - - type WeightInfo = pallet_bridge_grandpa::weights::MillauWeight; -} - -// Instance that is "deployed" at Wococo chain. Responsible for dispatching Rococo -> Wococo messages. -pub type AtWococoFromRococoMessagesDispatch = (); -impl pallet_bridge_dispatch::Config for Runtime { - type Event = Event; - type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); - type Call = Call; - type CallFilter = frame_support::traits::Everything; - type EncodedCall = bridge_messages::FromRococoEncodedCall; - type SourceChainAccountId = bp_wococo::AccountId; - type TargetChainAccountPublic = sp_runtime::MultiSigner; - type TargetChainSignature = sp_runtime::MultiSignature; - type AccountIdConverter = bp_rococo::AccountIdConverter; -} - -// Instance that is "deployed" at Rococo chain. Responsible for dispatching Wococo -> Rococo messages. -pub type AtRococoFromWococoMessagesDispatch = pallet_bridge_dispatch::Instance1; -impl pallet_bridge_dispatch::Config for Runtime { - type Event = Event; - type BridgeMessageId = (bp_messages::LaneId, bp_messages::MessageNonce); - type Call = Call; - type CallFilter = frame_support::traits::Everything; - type EncodedCall = bridge_messages::FromWococoEncodedCall; - type SourceChainAccountId = bp_rococo::AccountId; - type TargetChainAccountPublic = sp_runtime::MultiSigner; - type TargetChainSignature = sp_runtime::MultiSignature; - type AccountIdConverter = bp_wococo::AccountIdConverter; -} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: bp_messages::MessageNonce = 8; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: bp_messages::MessageNonce = - bp_rococo::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - pub const MaxUnconfirmedMessagesAtInboundLane: bp_messages::MessageNonce = - bp_rococo::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; - pub const RootAccountForPayments: Option = None; - pub const RococoChainId: bp_runtime::ChainId = bp_runtime::ROCOCO_CHAIN_ID; - pub const WococoChainId: bp_runtime::ChainId = bp_runtime::WOCOCO_CHAIN_ID; -} - -// Instance that is "deployed" at Wococo chain. Responsible for sending Wococo -> Rococo messages -// and receiving Rococo -> Wococo messages. -pub type AtWococoWithRococoMessagesInstance = (); -impl pallet_bridge_messages::Config for Runtime { - type Event = Event; - type BridgedChainId = RococoChainId; - type WeightInfo = pallet_bridge_messages::weights::MillauWeight; - type Parameter = (); - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = crate::bridge_messages::ToRococoMessagePayload; - type OutboundMessageFee = bp_wococo::Balance; - - type InboundPayload = crate::bridge_messages::FromRococoMessagePayload; - type InboundMessageFee = bp_rococo::Balance; - type InboundRelayer = bp_rococo::AccountId; - - type AccountIdConverter = bp_wococo::AccountIdConverter; - - type TargetHeaderChain = crate::bridge_messages::RococoAtWococo; - type LaneMessageVerifier = crate::bridge_messages::ToRococoMessageVerifier; - type MessageDeliveryAndDispatchPayment = - pallet_bridge_messages::instant_payments::InstantCurrencyPayments< - Runtime, - AtWococoWithRococoMessagesInstance, - pallet_balances::Pallet, - crate::bridge_messages::GetDeliveryConfirmationTransactionFee, - >; - type OnDeliveryConfirmed = (); - type OnMessageAccepted = (); - - type SourceHeaderChain = crate::bridge_messages::RococoAtWococo; - type MessageDispatch = crate::bridge_messages::FromRococoMessageDispatch; -} - -// Instance that is "deployed" at Rococo chain. Responsible for sending Rococo -> Wococo messages -// and receiving Wococo -> Rococo messages. -pub type AtRococoWithWococoMessagesInstance = pallet_bridge_messages::Instance1; -impl pallet_bridge_messages::Config for Runtime { - type Event = Event; - type BridgedChainId = WococoChainId; - type WeightInfo = pallet_bridge_messages::weights::MillauWeight; - type Parameter = (); - type MaxMessagesToPruneAtOnce = MaxMessagesToPruneAtOnce; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type OutboundPayload = crate::bridge_messages::ToWococoMessagePayload; - type OutboundMessageFee = bp_rococo::Balance; - - type InboundPayload = crate::bridge_messages::FromWococoMessagePayload; - type InboundMessageFee = bp_wococo::Balance; - type InboundRelayer = bp_wococo::AccountId; - - type AccountIdConverter = bp_rococo::AccountIdConverter; - - type TargetHeaderChain = crate::bridge_messages::WococoAtRococo; - type LaneMessageVerifier = crate::bridge_messages::ToWococoMessageVerifier; - type MessageDeliveryAndDispatchPayment = - pallet_bridge_messages::instant_payments::InstantCurrencyPayments< - Runtime, - AtRococoWithWococoMessagesInstance, - pallet_balances::Pallet, - crate::bridge_messages::GetDeliveryConfirmationTransactionFee, - >; - type OnDeliveryConfirmed = (); - type OnMessageAccepted = (); - - type SourceHeaderChain = crate::bridge_messages::WococoAtRococo; - type MessageDispatch = crate::bridge_messages::FromWococoMessageDispatch; -} - parameter_types! { pub const EndingPeriod: BlockNumber = 1 * HOURS; pub const SampleLength: BlockNumber = 1; @@ -1452,90 +1303,6 @@ sp_api::impl_runtime_apis! { } } - impl bp_rococo::RococoFinalityApi for Runtime { - fn best_finalized() -> (bp_rococo::BlockNumber, bp_rococo::Hash) { - let header = BridgeRococoGrandpa::best_finalized(); - (header.number, header.hash()) - } - } - - impl bp_wococo::WococoFinalityApi for Runtime { - fn best_finalized() -> (bp_wococo::BlockNumber, bp_wococo::Hash) { - let header = BridgeWococoGrandpa::best_finalized(); - (header.number, header.hash()) - } - } - - impl bp_rococo::ToRococoOutboundLaneApi for Runtime { - fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_messages::LaneId, - payload: bridge_messages::ToWococoMessagePayload, - rococo_to_wococo_conversion_rate: Option, - ) -> Option { - estimate_message_dispatch_and_delivery_fee::( - &payload, - bridge_messages::AtWococoWithRococoMessageBridge::RELAYER_FEE_PERCENT, - rococo_to_wococo_conversion_rate, - ).ok() - } - - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec> { - (begin..=end).filter_map(|nonce| { - let message_data = BridgeRococoMessages::outbound_message_data(lane, nonce)?; - let decoded_payload = bridge_messages::ToRococoMessagePayload::decode( - &mut &message_data.payload[..] - ).ok()?; - Some(bp_messages::MessageDetails { - nonce, - dispatch_weight: decoded_payload.weight, - size: message_data.payload.len() as _, - delivery_and_dispatch_fee: message_data.fee, - dispatch_fee_payment: decoded_payload.dispatch_fee_payment, - }) - }) - .collect() - } - } - - impl bp_wococo::ToWococoOutboundLaneApi for Runtime { - fn estimate_message_delivery_and_dispatch_fee( - _lane_id: bp_messages::LaneId, - payload: bridge_messages::ToWococoMessagePayload, - wococo_to_rococo_conversion_rate: Option, - ) -> Option { - estimate_message_dispatch_and_delivery_fee::( - &payload, - bridge_messages::AtRococoWithWococoMessageBridge::RELAYER_FEE_PERCENT, - wococo_to_rococo_conversion_rate, - ).ok() - } - - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> Vec> { - (begin..=end).filter_map(|nonce| { - let message_data = BridgeWococoMessages::outbound_message_data(lane, nonce)?; - let decoded_payload = bridge_messages::ToWococoMessagePayload::decode( - &mut &message_data.payload[..] - ).ok()?; - Some(bp_messages::MessageDetails { - nonce, - dispatch_weight: decoded_payload.weight, - size: message_data.payload.len() as _, - delivery_and_dispatch_fee: message_data.fee, - dispatch_fee_payment: decoded_payload.dispatch_fee_payment, - }) - }) - .collect() - } - } - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { fn account_nonce(account: AccountId) -> Nonce { System::account_nonce(account) -- GitLab