diff --git a/.config/lingua.dic b/.config/lingua.dic index 46ed6417520de590afe2dd1bc01e66cad777a503..0ef7f9bef73e1a67bcf610bc98b4c01f97110f44 100644 --- a/.config/lingua.dic +++ b/.config/lingua.dic @@ -84,7 +84,7 @@ SS58Prefix STALL_SYNC_TIMEOUT SURI ServiceFactory/MS -TransactionExtension +SignedExtension Stringified Submitter1 S|N diff --git a/.github/dependabot.yml b/.github/dependabot.yml index c0c8ea6480205c2266d02ba7bf1b2cd602e65a40..8319805c64e71d15e80f5d10a97c5f8aeab12969 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -8,7 +8,35 @@ updates: timezone: Europe/Berlin open-pull-requests-limit: 20 ignore: - # Substrate (+ Polkadot/Cumulus pallets) dependencies + # Bridges polkadot-sdk dependencies + - dependency-name: bp-* + versions: + - ">= 0" + - dependency-name: bridge-runtime-common + versions: + - ">= 0" + - dependency-name: equivocation-detector + versions: + - ">= 0" + - dependency-name: finality-relay + versions: + - ">= 0" + - dependency-name: messages-relay + versions: + - ">= 0" + - dependency-name: parachains-relay + versions: + - ">= 0" + - dependency-name: relay-substrate-client + versions: + - ">= 0" + - dependency-name: relay-utils + versions: + - ">= 0" + - dependency-name: substrate-relay-helper + versions: + - ">= 0" + # Substrate polkadot-sdk (+ Polkadot/Cumulus pallets) dependencies - dependency-name: beefy-* versions: - ">= 0" @@ -42,7 +70,7 @@ updates: - dependency-name: binary-merkle-tree versions: - ">= 0" - # Polkadot dependencies + # Polkadot polkadot-sdk dependencies - dependency-name: kusama-* versions: - ">= 0" @@ -52,7 +80,7 @@ updates: - dependency-name: xcm* versions: - ">= 0" - # Cumulus dependencies + # Cumulus polkadot-sdk dependencies - dependency-name: cumulus-* versions: - ">= 0" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 892de67040809874aec7a8c74d03a33e4319e48f..ae6195e583b41292b7fc302b503d13c2ccfb28e6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -10,7 +10,7 @@ variables: GIT_DEPTH: 100 CARGO_INCREMENTAL: 0 ARCH: "x86_64" - CI_IMAGE: "paritytech/bridges-ci:production" + CI_IMAGE: "paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408" RUST_BACKTRACE: full BUILDAH_IMAGE: "quay.io/buildah/stable:v1.29" BUILDAH_COMMAND: "buildah --storage-driver overlay2" @@ -121,7 +121,7 @@ check: <<: *docker-env <<: *test-refs script: &check-script - - SKIP_WASM_BUILD=1 time cargo check --locked --verbose --workspace --features runtime-benchmarks + - SKIP_WASM_BUILD=1 time cargo check --locked --verbose --workspace check-nightly: stage: test @@ -142,7 +142,7 @@ test: # Enable this, when you see: "`cargo metadata` can not fail on project `Cargo.toml`" #- time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"polkadot-runtime\").manifest_path"` #- time cargo fetch --manifest-path=`cargo metadata --format-version=1 | jq --compact-output --raw-output ".packages[] | select(.name == \"kusama-runtime\").manifest_path"` - - CARGO_NET_OFFLINE=true SKIP_WASM_BUILD=1 time cargo test --verbose --workspace --features runtime-benchmarks + - CARGO_NET_OFFLINE=true SKIP_WASM_BUILD=1 time cargo test --verbose --workspace test-nightly: stage: test diff --git a/Cargo.lock b/Cargo.lock index 450e8b4f9f2568edcb8d30be84740a74565ddac2..f82543d8dd040928b31a564581760ce9e0c3a065 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,16 +18,16 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli", + "gimli 0.27.3", ] [[package]] name = "addr2line" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ - "gimli", + "gimli 0.28.1", ] [[package]] @@ -36,15 +36,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "aead" version = "0.5.2" @@ -57,74 +48,48 @@ dependencies = [ [[package]] name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - -[[package]] -name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher 0.4.4", "cpufeatures", ] [[package]] name = "aes-gcm" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5f85a83a7d8b0442b6aa7b504b8212c1733da07b98aae43d4bc21b2cb3cdf6" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.8.0", - "ghash 0.4.4", - "subtle 2.4.1", -] - -[[package]] -name = "aes-gcm" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209b47e8954a928e1d72e86eca7000ebb6655fe1436d33eefc2201cad027e237" +checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", - "aes 0.8.3", + "aead", + "aes", "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", - "subtle 2.4.1", + "ctr", + "ghash", + "subtle 2.5.0", ] [[package]] name = "ahash" -version = "0.7.6" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.14", "once_cell", "version_check", ] [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", - "getrandom 0.2.10", + "cfg-if", + "getrandom 0.2.14", "once_cell", "version_check", "zerocopy", @@ -132,18 +97,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -171,9 +136,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -219,9 +184,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "approx" @@ -239,11 +204,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21cc1548309245035eb18aa7f0967da6bc65587005170c56e6ef2788a4cf3f4e" dependencies = [ "include_dir", - "itertools", + "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -332,7 +297,7 @@ dependencies = [ "ark-std", "derivative", "hashbrown 0.13.2", - "itertools", + "itertools 0.10.5", "num-traits", "rayon", "zeroize", @@ -400,7 +365,7 @@ dependencies = [ "ark-std", "derivative", "digest 0.10.7", - "itertools", + "itertools 0.10.5", "num-bigint", "num-traits", "paste", @@ -414,7 +379,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" dependencies = [ - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -426,8 +391,8 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -504,8 +469,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -516,7 +481,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", "rayon", ] @@ -541,9 +506,9 @@ checksum = "f52f63c5c1316a16a4b35eaac8b76a98248961a533f061684cb2a7cb0eafb6c6" [[package]] name = "array-bytes" -version = "6.1.0" +version = "6.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b1c5a481ec30a5abd8dfbd94ab5cf1bb4e9a66be7f1b3b322f2f1170c200fd" +checksum = "6f840fb7195bcfc5e17ea40c26e5ce6d5b9ce5d584466e17703209657e459ae0" [[package]] name = "arrayref" @@ -588,8 +553,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", "synstructure", ] @@ -600,8 +565,8 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -611,7 +576,7 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" dependencies = [ - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -622,48 +587,59 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" dependencies = [ "concurrent-queue", - "event-listener", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d4d23bcc79e27423727b36823d86233aad06dfea531837b038394d11e9928" +dependencies = [ + "concurrent-queue", + "event-listener 5.3.0", + "event-listener-strategy 0.5.1", "futures-core", + "pin-project-lite 0.2.14", ] [[package]] name = "async-executor" -version = "1.5.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fa3dc5f2a8564f07759c008b9109dc0d39de92a88d5588b8a5036d286383afb" +checksum = "b10202063978b3351199d68f8b22c4e47e4b1b822f8d43fd862d5ea8c006b29a" dependencies = [ - "async-lock", "async-task", "concurrent-queue", - "fastrand 1.9.0", - "futures-lite", + "fastrand 2.0.2", + "futures-lite 2.3.0", "slab", ] [[package]] name = "async-fs" -version = "1.6.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "279cf904654eeebfa37ac9bb1598880884924aab82e290aa65c9e77a0e142e06" +checksum = "bc19683171f287921f2405677dd2ed2549c3b3bda697a563ebc3a121ace2aba1" dependencies = [ - "async-lock", - "autocfg", + "async-lock 3.3.0", "blocking", - "futures-lite", + "futures-lite 2.3.0", ] [[package]] name = "async-global-executor" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel", + "async-channel 2.2.1", "async-executor", - "async-io", - "async-lock", + "async-io 2.3.2", + "async-lock 3.3.0", "blocking", - "futures-lite", + "futures-lite 2.3.0", "once_cell", ] @@ -673,56 +649,105 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ - "async-lock", + "async-lock 2.8.0", "autocfg", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", - "futures-lite", + "futures-lite 1.13.0", "log", "parking", - "polling", + "polling 2.8.0", "rustix 0.37.27", "slab", - "socket2 0.4.9", + "socket2 0.4.10", "waker-fn", ] +[[package]] +name = "async-io" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" +dependencies = [ + "async-lock 3.3.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.6.0", + "rustix 0.38.32", + "slab", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + [[package]] name = "async-lock" -version = "2.7.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa24f727524730b077666307f2734b4a1a1c57acb79193127dcc8914d5242dd7" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ - "event-listener", + "event-listener 4.0.3", + "event-listener-strategy 0.4.0", + "pin-project-lite 0.2.14", ] [[package]] name = "async-net" -version = "1.7.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4051e67316bc7eff608fe723df5d32ed639946adcd69e07df41fd42a7b411f1f" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" dependencies = [ - "async-io", - "autocfg", + "async-io 2.3.2", "blocking", - "futures-lite", + "futures-lite 2.3.0", ] [[package]] name = "async-process" -version = "1.7.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9d28b1d97e08915212e2e45310d47854eafa69600756fc735fb788f75199c9" +checksum = "cad07b3443bfa10dcddf86a452ec48949e8e7fedf7392d82de3969fda99e90ed" dependencies = [ - "async-io", - "async-lock", - "autocfg", + "async-channel 2.2.1", + "async-io 2.3.2", + "async-lock 3.3.0", + "async-signal", + "async-task", "blocking", - "cfg-if 1.0.0", - "event-listener", - "futures-lite", - "rustix 0.37.27", - "signal-hook", + "cfg-if", + "event-listener 5.3.0", + "futures-lite 2.3.0", + "rustix 0.38.32", + "tracing", + "windows-sys 0.52.0", +] + +[[package]] +name = "async-signal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e47d90f65a225c4527103a8d747001fc56e375203592b25ad103e1ca13124c5" +dependencies = [ + "async-io 2.3.2", + "async-lock 2.8.0", + "atomic-waker", + "cfg-if", + "futures-core", + "futures-io", + "rustix 0.38.32", + "signal-hook-registry", + "slab", "windows-sys 0.48.0", ] @@ -733,21 +758,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" dependencies = [ "async-attributes", - "async-channel", + "async-channel 1.9.0", "async-global-executor", - "async-io", - "async-lock", + "async-io 1.13.0", + "async-lock 2.8.0", "crossbeam-utils", "futures-channel", "futures-core", "futures-io", - "futures-lite", + "futures-lite 1.13.0", "gloo-timers", "kv-log-macro", "log", "memchr", "once_cell", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "pin-utils", "slab", "wasm-bindgen-futures", @@ -755,45 +780,45 @@ dependencies = [ [[package]] name = "async-task" -version = "4.4.0" +version = "4.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc7ab41815b3c653ccd2978ec3255c81349336702dfdf62ee6f7069b12a3aae" +checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799" [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "asynchronous-codec" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" +checksum = "4057f2c32adbb2fc158e22fb38433c8e9bbf76b75a4732c7c0cbaf695fb65568" dependencies = [ "bytes", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", ] [[package]] -name = "atomic" -version = "0.5.3" +name = "atomic-take" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" +checksum = "a8ab6b55fe97976e46f91ddbed8d147d966475dc29b2032757ba47e02376fbc3" [[package]] name = "atomic-waker" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "atty" @@ -808,9 +833,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backoff" @@ -818,23 +843,23 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.14", "instant", - "rand", + "rand 0.8.5", ] [[package]] name = "backtrace" -version = "0.3.68" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ - "addr2line 0.20.0", + "addr2line 0.21.0", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", - "object 0.31.1", + "object 0.32.2", "rustc-demangle", ] @@ -852,10 +877,10 @@ dependencies = [ "dleq_vrf", "fflonk", "merlin", - "rand_chacha", + "rand_chacha 0.3.1", "rand_core 0.6.4", "ring 0.1.0", - "sha2 0.10.7", + "sha2 0.10.8", "sp-ark-bls12-381", "sp-ark-ed-on-bls12-381-bandersnatch", "zeroize", @@ -887,9 +912,15 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.2" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" [[package]] name = "base64ct" @@ -906,15 +937,6 @@ dependencies = [ "serde", ] -[[package]] -name = "binary-merkle-tree" -version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" -dependencies = [ - "hash-db", - "log", -] - [[package]] name = "bincode" version = "1.3.3" @@ -930,19 +952,31 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ - "bitcoin_hashes", - "rand", - "rand_core 0.6.4", - "serde", - "unicode-normalization", + "bitcoin_hashes 0.11.0", ] +[[package]] +name = "bitcoin-internals" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" + [[package]] name = "bitcoin_hashes" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" +[[package]] +name = "bitcoin_hashes" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +dependencies = [ + "bitcoin-internals", + "hex-conservative", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -951,9 +985,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "bitvec" @@ -1001,13 +1035,37 @@ dependencies = [ [[package]] name = "blake2b_simd" -version = "1.0.1" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" +dependencies = [ + "arrayref", + "arrayvec 0.7.4", + "constant_time_eq 0.3.0", +] + +[[package]] +name = "blake2s_simd" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" +dependencies = [ + "arrayref", + "arrayvec 0.7.4", + "constant_time_eq 0.3.0", +] + +[[package]] +name = "blake3" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c2f0dc9a68c6317d884f97cc36cf5a3d20ba14ce404227df55e1af708ab04bc" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec 0.7.4", - "constant_time_eq 0.2.6", + "cc", + "cfg-if", + "constant_time_eq 0.3.0", ] [[package]] @@ -1030,17 +1088,18 @@ dependencies = [ [[package]] name = "blocking" -version = "1.3.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77231a1c8f801696fc0123ec6150ce92cffb8e164a02afb9c8ddee0e9b65ad65" +checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118" dependencies = [ - "async-channel", - "async-lock", + "async-channel 2.2.1", + "async-lock 3.3.0", "async-task", - "atomic-waker", - "fastrand 1.9.0", - "futures-lite", - "log", + "fastrand 2.0.2", + "futures-io", + "futures-lite 2.3.0", + "piper", + "tracing", ] [[package]] @@ -1055,46 +1114,10 @@ dependencies = [ "serde", ] -[[package]] -name = "bp-asset-hub-rococo" -version = "0.4.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "bp-asset-hub-westend" -version = "0.3.0" -dependencies = [ - "bp-xcm-bridge-hub-router", - "frame-support", - "parity-scale-codec", - "scale-info", -] - -[[package]] -name = "bp-beefy" -version = "0.1.0" -dependencies = [ - "binary-merkle-tree", - "bp-runtime", - "frame-support", - "pallet-beefy-mmr", - "pallet-mmr", - "parity-scale-codec", - "scale-info", - "serde", - "sp-consensus-beefy", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", -] - [[package]] name = "bp-bridge-hub-cumulus" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-messages", "bp-polkadot-core", @@ -1109,6 +1132,7 @@ dependencies = [ [[package]] name = "bp-bridge-hub-kusama" version = "0.6.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-bridge-hub-cumulus", "bp-messages", @@ -1122,6 +1146,7 @@ dependencies = [ [[package]] name = "bp-bridge-hub-polkadot" version = "0.6.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-bridge-hub-cumulus", "bp-messages", @@ -1135,6 +1160,7 @@ dependencies = [ [[package]] name = "bp-bridge-hub-rococo" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-bridge-hub-cumulus", "bp-messages", @@ -1148,6 +1174,7 @@ dependencies = [ [[package]] name = "bp-bridge-hub-westend" version = "0.3.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-bridge-hub-cumulus", "bp-messages", @@ -1161,13 +1188,11 @@ dependencies = [ [[package]] name = "bp-header-chain" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-runtime", - "bp-test-utils", "finality-grandpa", "frame-support", - "hex", - "hex-literal", "parity-scale-codec", "scale-info", "serde", @@ -1180,6 +1205,7 @@ dependencies = [ [[package]] name = "bp-kusama" version = "0.5.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-polkadot-core", @@ -1192,12 +1218,11 @@ dependencies = [ [[package]] name = "bp-messages" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-runtime", "frame-support", - "hex", - "hex-literal", "parity-scale-codec", "scale-info", "serde", @@ -1208,6 +1233,7 @@ dependencies = [ [[package]] name = "bp-parachains" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-polkadot-core", @@ -1224,6 +1250,7 @@ dependencies = [ [[package]] name = "bp-polkadot" version = "0.5.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-polkadot-core", @@ -1236,6 +1263,7 @@ dependencies = [ [[package]] name = "bp-polkadot-bulletin" version = "0.4.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-messages", @@ -1253,12 +1281,12 @@ dependencies = [ [[package]] name = "bp-polkadot-core" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-messages", "bp-runtime", "frame-support", "frame-system", - "hex", "parity-scale-codec", "parity-util-mem", "scale-info", @@ -1271,12 +1299,11 @@ dependencies = [ [[package]] name = "bp-relayers" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-messages", "bp-runtime", "frame-support", - "hex", - "hex-literal", "parity-scale-codec", "scale-info", "sp-runtime", @@ -1286,6 +1313,7 @@ dependencies = [ [[package]] name = "bp-rococo" version = "0.6.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-polkadot-core", @@ -1298,11 +1326,11 @@ dependencies = [ [[package]] name = "bp-runtime" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-support", "frame-system", "hash-db", - "hex-literal", "impl-trait-for-tuples", "log", "num-traits", @@ -1321,12 +1349,13 @@ dependencies = [ [[package]] name = "bp-test-utils" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-parachains", "bp-polkadot-core", "bp-runtime", - "ed25519-dalek", + "ed25519-dalek 2.1.1", "finality-grandpa", "parity-scale-codec", "sp-application-crypto", @@ -1340,6 +1369,7 @@ dependencies = [ [[package]] name = "bp-westend" version = "0.3.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-polkadot-core", @@ -1352,6 +1382,7 @@ dependencies = [ [[package]] name = "bp-xcm-bridge-hub" version = "0.2.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] @@ -1359,6 +1390,7 @@ dependencies = [ [[package]] name = "bp-xcm-bridge-hub-router" version = "0.6.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "scale-info", @@ -1369,6 +1401,7 @@ dependencies = [ [[package]] name = "bridge-runtime-common" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-messages", @@ -1376,14 +1409,12 @@ dependencies = [ "bp-polkadot-core", "bp-relayers", "bp-runtime", - "bp-test-utils", "bp-xcm-bridge-hub", "bp-xcm-bridge-hub-router", "frame-support", "frame-system", "hash-db", "log", - "pallet-balances", "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-bridge-parachains", @@ -1400,8 +1431,6 @@ dependencies = [ "sp-trie", "staging-xcm", "staging-xcm-builder", - "static_assertions", - "tuplex", ] [[package]] @@ -1412,18 +1441,18 @@ checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "tinyvec", ] [[package]] name = "bumpalo" -version = "3.13.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -1439,21 +1468,21 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "bytemuck" -version = "1.13.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17febce684fd15d89027105661fec94afb475cb995fbc59d2865198446ba2eea" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" [[package]] name = "c2-chacha" @@ -1473,28 +1502,23 @@ checksum = "a2698f953def977c68f935bb0dfa959375ad4638570e969e2f1e9f433cbf1af6" [[package]] name = "cc" -version = "1.0.79" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +checksum = "17f6e324229dc011159fcc089755d1e2e216a90d43a7dea6853ca740b84f35e7" dependencies = [ "jobserver", + "libc", ] [[package]] name = "cfg-expr" -version = "0.15.5" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03915af431787e6ffdcc74c645077518c6b6e01f80b761e0fbbfa288536311b3" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ "smallvec", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -1513,57 +1537,73 @@ dependencies = [ [[package]] name = "chacha20" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "cfg-if 1.0.0", - "cipher 0.3.0", + "cfg-if", + "cipher 0.4.4", "cpufeatures", - "zeroize", ] [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.4.3", + "aead", "chacha20", - "cipher 0.3.0", + "cipher 0.4.4", "poly1305", "zeroize", ] [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.48.1", + "windows-targets 0.52.5", ] [[package]] -name = "cipher" -version = "0.2.5" +name = "cid" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" +checksum = "b9b68e3193982cd54187d71afdb2a271ad4cf8af157858e9cb911b91321de143" dependencies = [ - "generic-array 0.14.7", + "core2", + "multibase", + "multihash 0.17.0", + "serde", + "unsigned-varint", +] + +[[package]] +name = "cid" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" +dependencies = [ + "core2", + "multibase", + "multihash 0.18.1", + "serde", + "unsigned-varint", ] [[package]] name = "cipher" -version = "0.3.0" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" +checksum = "12f8e7987cbd042a63249497f41aed09f8e65add917ea6566effbc56578d6801" dependencies = [ "generic-array 0.14.7", ] @@ -1576,24 +1616,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", -] - -[[package]] -name = "ckb-merkle-mountain-range" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f061f97d64fd1822664bdfb722f7ae5469a97b77567390f7442be5b5dc82a5b" -dependencies = [ - "cfg-if 0.1.10", -] - -[[package]] -name = "ckb-merkle-mountain-range" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ccb671c5921be8a84686e6212ca184cb1d7c51cadcdbfcbd1cc3f042f5dfb8" -dependencies = [ - "cfg-if 1.0.0", + "zeroize", ] [[package]] @@ -1617,6 +1640,16 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "common" version = "0.1.0" @@ -1630,7 +1663,7 @@ dependencies = [ "fflonk", "getrandom_or_panic", "merlin", - "rand_chacha", + "rand_chacha 0.3.1", ] [[package]] @@ -1641,38 +1674,36 @@ checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-oid" -version = "0.9.4" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "795bc6e66a8e340f075fcf6227e417a2dc976b92b91f3cdc778bb858778b6747" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368a7a772ead6ce7e1de82bfb04c485f3db8ec744f72925af5735e29a22cc18e" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", - "proc-macro-hack", ] [[package]] name = "const-random-macro" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d7d6ab3c3a2282db210df5f02c4dab6e0a7057af0fb7ebd4070f30fe05c0ddb" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.14", "once_cell", - "proc-macro-hack", "tiny-keccak", ] @@ -1684,9 +1715,9 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "constant_time_eq" -version = "0.2.6" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a53c0a4d288377e7415b53dcfc3c04da5cdc2cc95c8d5ac178b58f0b861ad6" +checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" [[package]] name = "constcat" @@ -1702,9 +1733,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -1712,9 +1743,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" [[package]] name = "core2" @@ -1731,14 +1762,14 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] @@ -1764,7 +1795,7 @@ dependencies = [ "cranelift-codegen-shared", "cranelift-entity", "cranelift-isle", - "gimli", + "gimli 0.27.3", "hashbrown 0.13.2", "log", "regalloc2", @@ -1834,64 +1865,70 @@ dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.5", "log", "smallvec", "wasmparser", "wasmtime-types", ] +[[package]] +name = "crc" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if 1.0.0", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if 1.0.0", "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if 1.0.0", "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" [[package]] name = "crunchy" @@ -1901,13 +1938,13 @@ checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" [[package]] name = "crypto-bigint" -version = "0.5.2" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4c2f4e1afd912bc40bfd6fed5d9dc1f288e0ba01bfcc835cc5bc3eb13efe15" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array 0.14.7", "rand_core 0.6.4", - "subtle 2.4.1", + "subtle 2.5.0", "zeroize", ] @@ -1939,16 +1976,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ "generic-array 0.14.7", - "subtle 2.4.1", -] - -[[package]] -name = "ctr" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "049bb91fb4aaf0e3c7efa6cd5ef877dbbbd15b39dad06d9948de4ec8a75761ea" -dependencies = [ - "cipher 0.3.0", + "subtle 2.5.0", ] [[package]] @@ -1962,24 +1990,24 @@ dependencies = [ [[package]] name = "curl" -version = "0.4.44" +version = "0.4.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "509bd11746c7ac09ebd19f0b17782eae80aadee26237658a6b4808afb5c11a22" +checksum = "1e2161dd6eba090ff1594084e95fd67aeccf04382ffea77999ea94ed42ec67b6" dependencies = [ "curl-sys", "libc", "openssl-probe", "openssl-sys", "schannel", - "socket2 0.4.9", - "winapi", + "socket2 0.5.6", + "windows-sys 0.52.0", ] [[package]] name = "curl-sys" -version = "0.4.63+curl-8.1.2" +version = "0.4.72+curl-8.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeb0fef7046022a1e2ad67a004978f0e3cacb9e3123dc62ce768f92197b771dc" +checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" dependencies = [ "cc", "libc", @@ -1988,7 +2016,7 @@ dependencies = [ "openssl-sys", "pkg-config", "vcpkg", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -2000,49 +2028,36 @@ dependencies = [ "byteorder", "digest 0.9.0", "rand_core 0.5.1", - "subtle 2.4.1", + "subtle 2.5.0", "zeroize", ] [[package]] name = "curve25519-dalek" -version = "4.1.1" +version = "4.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89b8c6a2e4b1f45971ad09761aafb85514a84744b67a95e32c3cc1352d1f65c" +checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", "platforms", "rustc_version", - "subtle 2.4.1", + "subtle 2.5.0", "zeroize", ] [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" -dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", -] - -[[package]] -name = "curve25519-dalek-ng" -version = "4.1.1" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.6.4", - "subtle-ng", - "zeroize", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -2057,12 +2072,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" dependencies = [ - "darling_core 0.20.3", - "darling_macro 0.20.3", + "darling_core 0.20.8", + "darling_macro 0.20.8", ] [[package]] @@ -2073,24 +2088,24 @@ checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "strsim 0.10.0", "syn 1.0.109", ] [[package]] name = "darling_core" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "strsim 0.10.0", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -2100,32 +2115,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core 0.14.4", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "darling_macro" -version = "0.20.3" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ - "darling_core 0.20.3", - "quote 1.0.35", - "syn 2.0.52", + "darling_core 0.20.8", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "data-encoding" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" [[package]] name = "data-encoding-macro" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c904b33cc60130e1aeea4956ab803d08a3f4a0ca82d64ed757afac3891f2bb99" +checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2133,9 +2148,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdf3fce3ce863539ec1d7fd1b6dcc3c645663376b43ed376bbf887733e4f772" +checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" dependencies = [ "data-encoding", "syn 1.0.109", @@ -2143,9 +2158,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ed52955ce76b1554f509074bb357d3fb8ac9b51288a65a3fd480d1dfba946" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -2167,9 +2182,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", ] @@ -2180,8 +2195,8 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -2191,11 +2206,22 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79116f119dd1dba1abf1f3405f03b9b0e79a27a3883864bfebded8a3dc768cd" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "derive-syn-parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" +dependencies = [ + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", +] + [[package]] name = "derive_more" version = "0.99.17" @@ -2203,8 +2229,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "rustc_version", "syn 1.0.109", ] @@ -2242,7 +2268,7 @@ dependencies = [ "block-buffer 0.10.4", "const-oid", "crypto-common", - "subtle 2.4.1", + "subtle 2.5.0", ] [[package]] @@ -2251,7 +2277,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "dirs-sys-next", ] @@ -2272,9 +2298,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -2295,28 +2321,28 @@ dependencies = [ [[package]] name = "docify" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc4fd38aaa9fb98ac70794c82a00360d1e165a87fbf96a8a91f9dfc602aaee2" +checksum = "43a2f138ad521dc4a2ced1a4576148a6a610b4c5923933b062a263130a6802ce" dependencies = [ "docify_macros", ] [[package]] name = "docify_macros" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63fa215f3a0d40fb2a221b3aa90d8e1fbb8379785a990cb60d62ac71ebdc6460" +checksum = "1a081e51fb188742f5a7a1164ad752121abcb22874b21e2c3b0dd040c515fdad" dependencies = [ "common-path", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "once_cell", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "regex", - "syn 2.0.52", + "syn 2.0.60", "termcolor", - "toml 0.8.11", + "toml 0.8.12", "walkdir", ] @@ -2328,9 +2354,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "downcast-rs" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" [[package]] name = "dtoa" @@ -2354,16 +2380,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "dyn-clone" -version = "1.0.12" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "304e6508efa593091e97a9abbc10f90aa7ca635b6d2784feff3c89d41dd12272" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ecdsa" @@ -2376,18 +2402,41 @@ dependencies = [ "elliptic-curve", "rfc6979", "serdect", - "signature", + "signature 2.2.0", "spki", ] [[package]] name = "ed25519" -version = "2.2.2" +version = "1.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7" +dependencies = [ + "signature 1.6.4", +] + +[[package]] +name = "ed25519" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", - "signature", + "signature 2.2.0", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.2.0", + "ed25519 1.5.3", + "rand 0.7.3", + "serde", + "sha2 0.9.9", + "zeroize", ] [[package]] @@ -2396,12 +2445,12 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.1", - "ed25519", + "curve25519-dalek 4.1.2", + "ed25519 2.2.3", "rand_core 0.6.4", "serde", - "sha2 0.10.7", - "subtle 2.4.1", + "sha2 0.10.8", + "subtle 2.5.0", "zeroize", ] @@ -2419,11 +2468,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-zebra" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" +dependencies = [ + "curve25519-dalek 4.1.2", + "ed25519 2.2.3", + "hashbrown 0.14.3", + "hex", + "rand_core 0.6.4", + "sha2 0.10.8", + "zeroize", +] + [[package]] name = "either" -version = "1.9.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "elliptic-curve" @@ -2441,17 +2505,17 @@ dependencies = [ "rand_core 0.6.4", "sec1", "serdect", - "subtle 2.4.1", + "subtle 2.5.0", "zeroize", ] [[package]] name = "encoding_rs" -version = "0.8.32" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -2461,11 +2525,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "enum-as-inner" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" +dependencies = [ + "heck 0.4.1", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", +] + [[package]] name = "env_filter" version = "0.1.0" @@ -2517,6 +2593,7 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "equivocation-detector" version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-std", "async-trait", @@ -2572,17 +2649,60 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite 0.2.14", +] + +[[package]] +name = "event-listener" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9944b8ca13534cdfb2800775f8dd4902ff3fc75a50101466decadfdf322a24" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite 0.2.14", +] + +[[package]] +name = "event-listener-strategy" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +dependencies = [ + "event-listener 4.0.3", + "pin-project-lite 0.2.14", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "332f51cb23d20b0de8458b86580878211da09bcd4503cb579c225b3d124cabb3" +dependencies = [ + "event-listener 5.3.0", + "pin-project-lite 0.2.14", +] + [[package]] name = "expander" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f86a749cf851891866c10515ef6c299b5c69661465e9c3bbe7e07a2b77fb0f7" +checksum = "00e83c02035136f1592a47964ea60c05a50e4ed8b5892cfac197063850898d4d" dependencies = [ "blake2 0.10.6", "fs-err", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "prettier-please", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -2602,9 +2722,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "ff" @@ -2613,7 +2733,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ "rand_core 0.6.4", - "subtle 2.4.1", + "subtle 2.5.0", ] [[package]] @@ -2631,9 +2751,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.5" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27573eac26f4dd11e2b1916c3fe1baa56407c83c71a773a8ba17ec0bca03b6b7" +checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" [[package]] name = "file-per-thread-logger" @@ -2664,6 +2784,7 @@ dependencies = [ [[package]] name = "finality-relay" version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-std", "async-trait", @@ -2672,7 +2793,6 @@ dependencies = [ "futures", "log", "num-traits", - "parking_lot 0.12.1", "relay-utils", ] @@ -2683,7 +2803,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -2696,9 +2816,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9429470923de8e8cbd4d2dc513535400b4b3fef0319fb5c4e1f520a7bef743" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "libz-sys", @@ -2720,11 +2840,26 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -2738,7 +2873,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-support", "frame-support-procedural", @@ -2766,7 +2901,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "parity-scale-codec", "scale-info", ] @@ -2777,7 +2912,7 @@ version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "parity-scale-codec", "scale-info", "serde", @@ -2786,10 +2921,10 @@ dependencies = [ [[package]] name = "frame-support" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "aquamarine", - "array-bytes 6.1.0", + "array-bytes 6.2.2", "bitflags 1.3.2", "docify", "environmental", @@ -2827,50 +2962,50 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "Inflector", "cfg-expr", - "derive-syn-parse", + "derive-syn-parse 0.2.0", "expander", "frame-support-procedural-tools", - "itertools", + "itertools 0.10.5", "macro_magic", "proc-macro-warning", - "proc-macro2 1.0.76", - "quote 1.0.35", - "sp-crypto-hashing", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", + "syn 2.0.60", ] [[package]] name = "frame-support-procedural-tools" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "frame-support-procedural-tools-derive" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "frame-system" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "docify", "frame-support", "log", @@ -2887,9 +3022,12 @@ dependencies = [ [[package]] name = "fs-err" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" +checksum = "88a41f105fe1d5b6b34b2055e3dc59bb79b46b48b2040b9e6c7b4b5de097aa41" +dependencies = [ + "autocfg", +] [[package]] name = "funty" @@ -2957,19 +3095,32 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "waker-fn", ] +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.0.2", + "futures-core", + "futures-io", + "parking", + "pin-project-lite 0.2.14", +] + [[package]] name = "futures-macro" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -2979,7 +3130,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", - "rustls 0.20.8", + "rustls 0.20.9", "webpki", ] @@ -2997,9 +3148,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -3014,7 +3165,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "pin-utils", "slab", ] @@ -3054,18 +3205,18 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.11.0+wasi-snapshot-preview1", ] @@ -3076,28 +3227,18 @@ version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" dependencies = [ - "rand", + "rand 0.8.5", "rand_core 0.6.4", ] [[package]] name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.5.3", -] - -[[package]] -name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.6.1", + "opaque-debug 0.3.1", + "polyval", ] [[package]] @@ -3111,6 +3252,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + [[package]] name = "gloo-timers" version = "0.2.6" @@ -3131,14 +3278,14 @@ checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff", "rand_core 0.6.4", - "subtle 2.4.1", + "subtle 2.5.0", ] [[package]] name = "h2" -version = "0.3.20" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -3146,7 +3293,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -3174,7 +3321,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.6", + "ahash 0.7.8", ] [[package]] @@ -3183,7 +3330,7 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", ] [[package]] @@ -3192,7 +3339,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", "allocator-api2", "serde", ] @@ -3232,9 +3379,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -3242,6 +3389,12 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-conservative" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" + [[package]] name = "hex-literal" version = "0.4.1" @@ -3250,9 +3403,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac 0.12.1", ] @@ -3287,6 +3440,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3300,9 +3462,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -3311,13 +3473,13 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", ] [[package]] @@ -3328,9 +3490,9 @@ checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" @@ -3340,9 +3502,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -3354,8 +3516,8 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", - "socket2 0.4.9", + "pin-project-lite 0.2.14", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3364,32 +3526,32 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http", "hyper", "log", - "rustls 0.21.5", - "rustls-native-certs", + "rustls 0.21.10", + "rustls-native-certs 0.6.3", "tokio", - "tokio-rustls", + "tokio-rustls 0.24.1", ] [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.48.0", + "windows-core 0.52.0", ] [[package]] @@ -3428,23 +3590,33 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" -version = "0.7.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" +checksum = "cabb0019d51a643781ff15c9c8a3e5dedc365c47211270f4e8f82812fedd8f0a" dependencies = [ "libc", - "winapi", + "windows-sys 0.48.0", ] [[package]] name = "if-watch" -version = "3.0.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9465340214b296cd17a0009acdb890d6160010b8adf8f78a00d0d7ab270f79f" +checksum = "d6b0422c86d7ce0e97169cc42e04ae643caf278874a7a3c87b8150a220dc7e1e" dependencies = [ - "async-io", + "async-io 2.3.2", "core-foundation", "fnv", "futures", @@ -3454,7 +3626,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows 0.34.0", + "windows 0.51.1", ] [[package]] @@ -3490,8 +3662,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -3510,8 +3682,8 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", ] [[package]] @@ -3527,9 +3699,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -3556,7 +3728,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -3568,19 +3740,13 @@ dependencies = [ "num-traits", ] -[[package]] -name = "intx" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f38a50a899dc47a6d0ed5508e7f601a2e34c3a85303514b5d137f3c10a0c75" - [[package]] name = "io-lifetimes" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -3597,7 +3763,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg", @@ -3605,19 +3771,19 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "hermit-abi 0.3.2", - "rustix 0.38.31", - "windows-sys 0.48.0", + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", ] [[package]] @@ -3626,19 +3792,19 @@ version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "334e04b4d781f436dc315cb1e7515bd96826426345d498149e4bde36b67f8ee9" dependencies = [ - "async-channel", + "async-channel 1.9.0", "castaway", "crossbeam-utils", "curl", "curl-sys", "encoding_rs", - "event-listener", - "futures-lite", + "event-listener 2.5.3", + "futures-lite 1.13.0", "http", "log", "mime", "once_cell", - "polling", + "polling 2.8.0", "slab", "sluice", "tracing", @@ -3656,26 +3822,35 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "685a7d121ee3f65ae4fddd72b25a04bb36b6af81bc0828f7d5434c0fe60fa3a2" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -3693,159 +3868,78 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.17.1" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b971ce0f6cd1521ede485afc564b95b2c8e7079b9da41d4273bd9b55140a55d" +checksum = "c4b0e68d9af1f066c06d6e2397583795b912d78537d7d907c561e82c13d69fa1" dependencies = [ - "jsonrpsee-core 0.17.1", - "jsonrpsee-proc-macros 0.17.1", - "jsonrpsee-types 0.17.1", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-http-client", + "jsonrpsee-proc-macros", + "jsonrpsee-server", + "jsonrpsee-types", "jsonrpsee-ws-client", + "tokio", "tracing", ] [[package]] -name = "jsonrpsee" -version = "0.20.1" +name = "jsonrpsee-client-transport" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ad9b31183a8bcbe843e32ca8554ad2936633548d95a7bb6a8e14c767dea6b05" +checksum = "92f254f56af1ae84815b9b1325094743dcf05b92abb5e94da2e81a35cff0cada" dependencies = [ - "jsonrpsee-client-transport 0.20.1", - "jsonrpsee-core 0.20.1", - "jsonrpsee-http-client", - "jsonrpsee-types 0.20.1", -] - -[[package]] -name = "jsonrpsee" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3ae45a64cfc0882934f963be9431b2a165d667f53140358181f262aca0702" -dependencies = [ - "jsonrpsee-core 0.22.2", - "jsonrpsee-proc-macros 0.22.2", - "jsonrpsee-server", - "jsonrpsee-types 0.22.2", - "tokio", - "tracing", -] - -[[package]] -name = "jsonrpsee-client-transport" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ca00d975eda834826b04ad57d4e690c67439bb51b02eb0f8b7e4c30fcef8ab9" -dependencies = [ - "futures-util", - "http", - "jsonrpsee-core 0.17.1", - "pin-project", - "rustls-native-certs", - "soketto", - "thiserror", - "tokio", - "tokio-rustls", - "tokio-util", - "tracing", -] - -[[package]] -name = "jsonrpsee-client-transport" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f2743cad51cc86b0dbfe316309eeb87a9d96a3d7f4dd7a99767c4b5f065335" -dependencies = [ - "futures-util", - "http", - "jsonrpsee-core 0.20.1", - "pin-project", - "rustls-native-certs", - "soketto", - "thiserror", - "tokio", - "tokio-rustls", - "tokio-util", - "tracing", - "url", -] - -[[package]] -name = "jsonrpsee-core" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b83cca7a5a7899eed8b2935d5f755c8c4052ad66ab5b328bd34ac2b3ffd3515f" -dependencies = [ - "anyhow", - "async-lock", - "async-trait", - "beef", - "futures-timer", - "futures-util", - "jsonrpsee-types 0.17.1", - "rustc-hash", - "serde", - "serde_json", - "thiserror", - "tokio", - "tokio-stream", - "tracing", + "futures-util", + "http", + "jsonrpsee-core", + "pin-project", + "rustls-native-certs 0.7.0", + "rustls-pki-types", + "soketto", + "thiserror", + "tokio", + "tokio-rustls 0.25.0", + "tokio-util", + "tracing", + "url", ] [[package]] name = "jsonrpsee-core" -version = "0.20.1" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35dc957af59ce98373bcdde0c1698060ca6c2d2e9ae357b459c7158b6df33330" +checksum = "274d68152c24aa78977243bb56f28d7946e6aa309945b37d33174a3f92d89a3a" dependencies = [ "anyhow", - "async-lock", "async-trait", "beef", "futures-timer", "futures-util", "hyper", - "jsonrpsee-types 0.20.1", - "rustc-hash", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", -] - -[[package]] -name = "jsonrpsee-core" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75568f4f9696e3a47426e1985b548e1a9fcb13372a5e320372acaf04aca30d1" -dependencies = [ - "anyhow", - "async-trait", - "beef", - "futures-util", - "hyper", - "jsonrpsee-types 0.22.2", + "jsonrpsee-types", "parking_lot 0.12.1", - "rand", + "pin-project", + "rand 0.8.5", "rustc-hash", "serde", "serde_json", "thiserror", "tokio", + "tokio-stream", "tracing", ] [[package]] name = "jsonrpsee-http-client" -version = "0.20.1" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd865d0072764cb937b0110a92b5f53e995f7101cb346beca03d93a2dea79de" +checksum = "ac13bc1e44cd00448a5ff485824a128629c945f02077804cb659c07a0ba41395" dependencies = [ "async-trait", "hyper", "hyper-rustls", - "jsonrpsee-core 0.20.1", - "jsonrpsee-types 0.20.1", + "jsonrpsee-core", + "jsonrpsee-types", "serde", "serde_json", "thiserror", @@ -3857,41 +3951,28 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.17.1" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d814a21d9a819f8de1a41b819a263ffd68e4bb5f043d936db1c49b54684bde0a" -dependencies = [ - "heck 0.4.1", - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 1.0.109", -] - -[[package]] -name = "jsonrpsee-proc-macros" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ca066e73dd70294aebc5c2675d8ffae43be944af027c857ce0d4c51785f014" +checksum = "2c326f9e95aeff7d707b2ffde72c22a52acc975ba1c48587776c02b90c4747a6" dependencies = [ "heck 0.4.1", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "jsonrpsee-server" -version = "0.22.2" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e29c1bd1f9bba83c864977c73404e505f74f730fa0db89dd490ec174e36d7f0" +checksum = "3b5bfbda5f8fb63f997102fd18f73e35e34c84c6dcdbdbbe72c6e48f6d2c959b" dependencies = [ "futures-util", "http", "hyper", - "jsonrpsee-core 0.22.2", - "jsonrpsee-types 0.22.2", + "jsonrpsee-core", + "jsonrpsee-types", "pin-project", "route-recognizer", "serde", @@ -3907,37 +3988,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.17.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd301ccc3e08718393432d1961539d78c4580dcca86014dfe6769c308b2c08b2" -dependencies = [ - "anyhow", - "beef", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa9e25aec855b2a7d3ed90fded6c41e8c3fb72b63f071e1be3f0004eba19b625" -dependencies = [ - "anyhow", - "beef", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "jsonrpsee-types" -version = "0.22.2" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3467fd35feeee179f71ab294516bdf3a81139e7aeebdd860e46897c12e1a3368" +checksum = "3dc828e537868d6b12bbb07ec20324909a22ced6efca0057c825c3e1126b2c6d" dependencies = [ "anyhow", "beef", @@ -3948,14 +4001,15 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.17.1" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89a69852133d549b07cb37ff2d0ec540eae0d20abb75ae923f5d39bc7536d987" +checksum = "32f00abe918bf34b785f87459b9205790e5361a3f7437adb50e928dc243f27eb" dependencies = [ "http", - "jsonrpsee-client-transport 0.17.1", - "jsonrpsee-core 0.17.1", - "jsonrpsee-types 0.17.1", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", + "url", ] [[package]] @@ -3964,19 +4018,19 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa", "elliptic-curve", "once_cell", "serdect", - "sha2 0.10.7", + "sha2 0.10.8", ] [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] @@ -4019,15 +4073,15 @@ checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libnghttp2-sys" -version = "0.1.7+1.45.0" +version = "0.1.10+1.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ed28aba195b38d5ff02b9170cbff627e336a20925e43b4945390401c5dc93f" +checksum = "959c25552127d2e1fa72f0e52548ec04fc386e827ba71a7bd01db46a447dc135" dependencies = [ "cc", "libc", @@ -4042,7 +4096,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.10", + "getrandom 0.2.14", "instant", "libp2p-allow-block-list", "libp2p-connection-limits", @@ -4104,13 +4158,13 @@ dependencies = [ "libp2p-identity", "log", "multiaddr", - "multihash", + "multihash 0.17.0", "multistream-select", "once_cell", "parking_lot 0.12.1", "pin-project", "quick-protobuf", - "rand", + "rand 0.8.5", "rw-stream-sink", "smallvec", "thiserror", @@ -4129,7 +4183,7 @@ dependencies = [ "log", "parking_lot 0.12.1", "smallvec", - "trust-dns-resolver", + "trust-dns-resolver 0.22.0", ] [[package]] @@ -4161,13 +4215,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "276bb57e7af15d8f100d3c11cbdd32c6752b7eef4ba7a18ecf464972c07abcce" dependencies = [ "bs58 0.4.0", - "ed25519-dalek", + "ed25519-dalek 2.1.1", "log", "multiaddr", - "multihash", + "multihash 0.17.0", "quick-protobuf", - "rand", - "sha2 0.10.7", + "rand 0.8.5", + "sha2 0.10.8", "thiserror", "zeroize", ] @@ -4191,8 +4245,8 @@ dependencies = [ "libp2p-swarm", "log", "quick-protobuf", - "rand", - "sha2 0.10.7", + "rand 0.8.5", + "sha2 0.10.8", "smallvec", "thiserror", "uint", @@ -4213,11 +4267,11 @@ dependencies = [ "libp2p-identity", "libp2p-swarm", "log", - "rand", + "rand 0.8.5", "smallvec", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", - "trust-dns-proto", + "trust-dns-proto 0.22.0", "void", ] @@ -4249,8 +4303,8 @@ dependencies = [ "log", "once_cell", "quick-protobuf", - "rand", - "sha2 0.10.7", + "rand 0.8.5", + "sha2 0.10.8", "snow", "static_assertions", "thiserror", @@ -4271,7 +4325,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "rand", + "rand 0.8.5", "void", ] @@ -4291,8 +4345,8 @@ dependencies = [ "log", "parking_lot 0.12.1", "quinn-proto", - "rand", - "rustls 0.20.8", + "rand 0.8.5", + "rustls 0.20.9", "thiserror", "tokio", ] @@ -4309,7 +4363,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm", - "rand", + "rand 0.8.5", "smallvec", ] @@ -4328,7 +4382,7 @@ dependencies = [ "libp2p-identity", "libp2p-swarm-derive", "log", - "rand", + "rand 0.8.5", "smallvec", "tokio", "void", @@ -4341,7 +4395,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fba456131824ab6acd4c7bf61e9c0f0a3014b5fc9868ccb8e10d344594cdc4f" dependencies = [ "heck 0.4.1", - "quote 1.0.35", + "quote 1.0.36", "syn 1.0.109", ] @@ -4357,7 +4411,7 @@ dependencies = [ "libc", "libp2p-core", "log", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", ] @@ -4373,10 +4427,10 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.20.8", + "rustls 0.20.9", "thiserror", "webpki", - "x509-parser", + "x509-parser 0.14.0", "yasna", ] @@ -4426,6 +4480,16 @@ dependencies = [ "yamux", ] +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.5.0", + "libc", +] + [[package]] name = "libsecp256k1" version = "0.7.1" @@ -4439,7 +4503,7 @@ dependencies = [ "libsecp256k1-core", "libsecp256k1-gen-ecmult", "libsecp256k1-gen-genmult", - "rand", + "rand 0.8.5", "serde", "sha2 0.9.9", "typenum", @@ -4453,7 +4517,7 @@ checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" dependencies = [ "crunchy", "digest 0.9.0", - "subtle 2.4.1", + "subtle 2.5.0", ] [[package]] @@ -4476,9 +4540,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.9" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ee889ecc9568871456d42f603d6a0ce59ff328d291063a45cbdf0036baf6db" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "libc", @@ -4503,9 +4567,9 @@ dependencies = [ [[package]] name = "linregress" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de0b5f52a9f84544d268f5fabb71b38962d6aa3c6600b8bcd27d44ccf9c9c45" +checksum = "4de04dcecc58d366391f9920245b85ffa684558a5ef6e7736e754347c3aea9c2" dependencies = [ "nalgebra", ] @@ -4524,9 +4588,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4cd1a83af159aa67994778be9070f0ae1bd732942279cabb14f86f986a21456" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lioness" @@ -4540,11 +4604,65 @@ dependencies = [ "keystream", ] +[[package]] +name = "litep2p" +version = "0.3.0" +source = "git+https://github.com/paritytech/litep2p?branch=master#b142c9eb611fb2fe78d2830266a3675b37299ceb" +dependencies = [ + "async-trait", + "bs58 0.4.0", + "bytes", + "cid 0.10.1", + "ed25519-dalek 1.0.1", + "futures", + "futures-timer", + "hex-literal", + "indexmap 2.2.6", + "libc", + "mockall", + "multiaddr", + "multihash 0.17.0", + "network-interface", + "nohash-hasher", + "parking_lot 0.12.1", + "pin-project", + "prost", + "prost-build", + "quinn", + "rand 0.8.5", + "rcgen", + "ring 0.16.20", + "rustls 0.20.9", + "serde", + "sha2 0.10.8", + "simple-dns", + "smallvec", + "snow", + "socket2 0.5.6", + "static_assertions", + "str0m", + "thiserror", + "tokio", + "tokio-stream", + "tokio-tungstenite", + "tokio-util", + "tracing", + "trust-dns-resolver 0.23.2", + "uint", + "unsigned-varint", + "url", + "webpki", + "x25519-dalek 2.0.1", + "x509-parser 0.15.1", + "yasna", + "zeroize", +] + [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", @@ -4577,6 +4695,15 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "lru" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" +dependencies = [ + "hashbrown 0.14.3", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -4603,8 +4730,8 @@ checksum = "e03844fc635e92f3a0067e25fa4bf3e3dbf3f2927bf3aa01bb7bc8f1c428949d" dependencies = [ "macro_magic_core", "macro_magic_macros", - "quote 1.0.35", - "syn 2.0.52", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -4614,11 +4741,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "468155613a44cfd825f1fb0ffa532b018253920d404e6fca1e8d43155198a46d" dependencies = [ "const-random", - "derive-syn-parse", + "derive-syn-parse 0.1.5", "macro_magic_core_macros", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -4627,9 +4754,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -4639,8 +4766,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", - "quote 1.0.35", - "syn 2.0.52", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -4651,9 +4778,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata 0.1.10", ] @@ -4666,9 +4793,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matrixmultiply" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090126dc04f95dc0d1c1c91f61bdd474b3930ca064c1edc8a849da2c6cbe1e77" +checksum = "7574c1cf36da4798ab73da5b215bbf444f50718207754cb522201d78d1cd0ff2" dependencies = [ "autocfg", "rawpointer", @@ -4676,17 +4803,17 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memfd" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" +checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.37.27", + "rustix 0.38.32", ] [[package]] @@ -4707,15 +4834,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "memory-db" version = "0.32.0" @@ -4740,6 +4858,7 @@ dependencies = [ [[package]] name = "messages-relay" version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-std", "async-trait", @@ -4769,9 +4888,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] @@ -4798,16 +4917,16 @@ dependencies = [ "bitflags 1.3.2", "blake2 0.10.6", "c2-chacha", - "curve25519-dalek 4.1.1", + "curve25519-dalek 4.1.2", "either", "hashlink", "lioness", "log", "parking_lot 0.12.1", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_distr", - "subtle 2.4.1", + "subtle 2.5.0", "thiserror", "zeroize", ] @@ -4818,7 +4937,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "downcast", "fragile", "lazy_static", @@ -4833,9 +4952,9 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ - "cfg-if 1.0.0", - "proc-macro2 1.0.76", - "quote 1.0.35", + "cfg-if", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -4850,7 +4969,7 @@ dependencies = [ "data-encoding", "log", "multibase", - "multihash", + "multihash 0.17.0", "percent-encoding", "serde", "static_assertions", @@ -4875,21 +4994,44 @@ version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835d6ff01d610179fbce3de1694d007e500bf33a7f29689838941d6bf783ae40" dependencies = [ + "blake2b_simd", + "blake2s_simd", + "blake3", + "core2", + "digest 0.10.7", + "multihash-derive", + "sha2 0.10.8", + "sha3", + "unsigned-varint", +] + +[[package]] +name = "multihash" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfd8a792c1694c6da4f68db0a9d707c72bd260994da179e6030a5dcee00bb815" +dependencies = [ + "blake2b_simd", + "blake2s_simd", + "blake3", "core2", + "digest 0.10.7", "multihash-derive", + "sha2 0.10.8", + "sha3", "unsigned-varint", ] [[package]] name = "multihash-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" +checksum = "1d6d4752e6230d8ef7adf7bd5d8c4b1f6561c1014c5ba9a37445ccefe18aa1db" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 1.1.3", "proc-macro-error", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", "synstructure", ] @@ -4916,9 +5058,9 @@ dependencies = [ [[package]] name = "nalgebra" -version = "0.32.3" +version = "0.32.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307ed9b18cc2423f29e83f84fd23a8e73628727990181f18641a8b5dc2ab1caa" +checksum = "3ea4908d4f23254adda3daa60ffef0f1ac7b8c3e9a864cf3cc154b251908a2ef" dependencies = [ "approx", "matrixmultiply", @@ -4936,8 +5078,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -4996,9 +5138,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" dependencies = [ "bytes", "futures", @@ -5007,6 +5149,18 @@ dependencies = [ "tokio", ] +[[package]] +name = "network-interface" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae72fd9dbd7f55dda80c00d66acc3b2130436fcba9ea89118fc508eaae48dfb0" +dependencies = [ + "cc", + "libc", + "thiserror", + "winapi", +] + [[package]] name = "nix" version = "0.24.3" @@ -5014,7 +5168,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags 1.3.2", - "cfg-if 1.0.0", + "cfg-if", "libc", ] @@ -5061,11 +5215,21 @@ dependencies = [ "winapi", ] +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" dependencies = [ "autocfg", "num-integer", @@ -5074,9 +5238,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e0d21255c828d6f128a1e41534206671e8c3ea0c62f32291e808dc82cff17d" +checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" dependencies = [ "num-traits", ] @@ -5099,11 +5263,10 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] @@ -5135,15 +5298,15 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.2", + "hermit-abi 0.3.9", "libc", ] [[package]] name = "num_threads" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" dependencies = [ "libc", ] @@ -5162,9 +5325,9 @@ dependencies = [ [[package]] name = "object" -version = "0.31.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -5192,9 +5355,35 @@ checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" + +[[package]] +name = "openssl" +version = "0.10.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +dependencies = [ + "bitflags 2.5.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", +] [[package]] name = "openssl-probe" @@ -5202,22 +5391,38 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +[[package]] +name = "openssl-src" +version = "300.2.3+3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" -version = "0.9.90" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "pallet-authorship" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-support", "frame-system", @@ -5231,7 +5436,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "docify", "frame-benchmarking", @@ -5244,78 +5449,10 @@ dependencies = [ "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] -[[package]] -name = "pallet-beefy" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" -dependencies = [ - "frame-support", - "frame-system", - "log", - "pallet-authorship", - "pallet-session", - "parity-scale-codec", - "scale-info", - "serde", - "sp-consensus-beefy", - "sp-runtime", - "sp-session", - "sp-staking", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", -] - -[[package]] -name = "pallet-beefy-mmr" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" -dependencies = [ - "array-bytes 6.1.0", - "binary-merkle-tree", - "frame-support", - "frame-system", - "log", - "pallet-beefy", - "pallet-mmr", - "pallet-session", - "parity-scale-codec", - "scale-info", - "serde", - "sp-api", - "sp-consensus-beefy", - "sp-core", - "sp-io", - "sp-runtime", - "sp-state-machine", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", -] - -[[package]] -name = "pallet-bridge-beefy" -version = "0.1.0" -dependencies = [ - "bp-beefy", - "bp-runtime", - "bp-test-utils", - "ckb-merkle-mountain-range 0.3.2", - "frame-support", - "frame-system", - "log", - "pallet-beefy-mmr", - "pallet-mmr", - "parity-scale-codec", - "rand", - "scale-info", - "serde", - "sp-consensus-beefy", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", -] - [[package]] name = "pallet-bridge-grandpa" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-runtime", @@ -5328,8 +5465,6 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-consensus-grandpa", - "sp-core", - "sp-io", "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-trie", @@ -5338,19 +5473,17 @@ dependencies = [ [[package]] name = "pallet-bridge-messages" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-messages", "bp-runtime", - "bp-test-utils", "frame-benchmarking", "frame-support", "frame-system", "log", "num-traits", - "pallet-balances", "parity-scale-codec", "scale-info", - "sp-io", "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] @@ -5358,12 +5491,12 @@ dependencies = [ [[package]] name = "pallet-bridge-parachains" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-header-chain", "bp-parachains", "bp-polkadot-core", "bp-runtime", - "bp-test-utils", "frame-benchmarking", "frame-support", "frame-system", @@ -5371,8 +5504,6 @@ dependencies = [ "pallet-bridge-grandpa", "parity-scale-codec", "scale-info", - "sp-core", - "sp-io", "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-trie", @@ -5381,6 +5512,7 @@ dependencies = [ [[package]] name = "pallet-bridge-relayers" version = "0.7.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bp-messages", "bp-relayers", @@ -5389,61 +5521,41 @@ dependencies = [ "frame-support", "frame-system", "log", - "pallet-balances", "pallet-bridge-messages", "parity-scale-codec", "scale-info", "sp-arithmetic", - "sp-io", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", -] - -[[package]] -name = "pallet-grandpa" -version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "log", - "pallet-authorship", - "pallet-session", - "parity-scale-codec", - "scale-info", - "sp-application-crypto", - "sp-consensus-grandpa", - "sp-core", - "sp-io", - "sp-runtime", - "sp-session", - "sp-staking", + "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] -name = "pallet-mmr" -version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +name = "pallet-grandpa" +version = "28.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", + "pallet-authorship", + "pallet-session", "parity-scale-codec", "scale-info", + "sp-application-crypto", + "sp-consensus-grandpa", "sp-core", "sp-io", - "sp-mmr-primitives", "sp-runtime", + "sp-session", + "sp-staking", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "pallet-session" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-support", "frame-system", @@ -5465,7 +5577,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "docify", "frame-benchmarking", @@ -5485,9 +5597,8 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "frame-benchmarking", "frame-support", "frame-system", "parity-scale-codec", @@ -5502,7 +5613,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5514,83 +5625,51 @@ dependencies = [ [[package]] name = "pallet-utility" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" -dependencies = [ - "frame-benchmarking", - "frame-support", - "frame-system", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", -] - -[[package]] -name = "pallet-xcm-bridge-hub" -version = "0.2.0" -dependencies = [ - "bp-header-chain", - "bp-messages", - "bp-runtime", - "bp-xcm-bridge-hub", - "bridge-runtime-common", - "frame-support", - "frame-system", - "log", - "pallet-balances", - "pallet-bridge-messages", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "staging-xcm", - "staging-xcm-builder", - "staging-xcm-executor", -] - -[[package]] -name = "pallet-xcm-bridge-hub-router" -version = "0.5.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "bp-xcm-bridge-hub-router", "frame-benchmarking", "frame-support", "frame-system", - "log", "parity-scale-codec", "scale-info", "sp-core", "sp-io", "sp-runtime", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "staging-xcm", - "staging-xcm-builder", ] [[package]] name = "parachains-relay" version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-std", "async-trait", "bp-polkadot-core", "futures", "log", - "parity-scale-codec", "relay-substrate-client", "relay-utils", - "sp-core", +] + +[[package]] +name = "parity-bip39" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" +dependencies = [ + "bitcoin_hashes 0.13.0", + "rand 0.8.5", + "rand_core 0.6.4", + "serde", + "unicode-normalization", ] [[package]] name = "parity-scale-codec" -version = "3.6.4" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8e946cc0cc711189c0b0249fb8b599cbeeab9784d83c415719368bb8d4ac64" +checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -5603,13 +5682,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.4" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a296c3079b5fefbc499e1de58dc26c09b1b9a5952d26694ee89f04a43ebbb3e" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro-crate 2.0.0", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -5625,7 +5704,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d32c34f4f5ca7f9196001c0aba5a1f9a5a12382c8944b8b0f90233282d1e8f8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ethereum-types", "hashbrown 0.12.3", "impl-trait-for-tuples", @@ -5643,7 +5722,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.76", + "proc-macro2 1.0.81", "syn 1.0.109", "synstructure", ] @@ -5656,9 +5735,9 @@ checksum = "e1ad0aff30c1da14b1254fcb2af73e1fa9a28670e584a626f53a369d0e157304" [[package]] name = "parking" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -5678,7 +5757,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.8", + "parking_lot_core 0.9.9", ] [[package]] @@ -5687,7 +5766,7 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "instant", "libc", "redox_syscall 0.2.16", @@ -5697,15 +5776,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "redox_syscall 0.3.5", + "redox_syscall 0.4.1", "smallvec", - "windows-targets 0.48.1", + "windows-targets 0.48.5", ] [[package]] @@ -5722,7 +5801,7 @@ checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", "rand_core 0.6.4", - "subtle 2.4.1", + "subtle 2.5.0", ] [[package]] @@ -5752,38 +5831,38 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "petgraph" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" +checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 1.9.3", + "indexmap 2.2.6", ] [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -5794,9 +5873,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -5804,6 +5883,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4" +dependencies = [ + "atomic-waker", + "fastrand 2.0.2", + "futures-io", +] + [[package]] name = "pkcs8" version = "0.10.2" @@ -5816,20 +5906,20 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" -version = "3.0.2" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d7ddaed09e0eb771a79ab0fd64609ba0afb0a8366421957936ad14cbd13630" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "polkadot-core-primitives" version = "7.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "scale-info", @@ -5841,7 +5931,7 @@ dependencies = [ [[package]] name = "polkadot-parachain-primitives" version = "6.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bounded-collections", "derive_more", @@ -5858,7 +5948,7 @@ dependencies = [ [[package]] name = "polkadot-primitives" version = "7.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bitvec", "hex-literal", @@ -5929,9 +6019,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -5941,7 +6031,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] @@ -5958,47 +6048,50 @@ checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg", "bitflags 1.3.2", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "windows-sys 0.48.0", ] [[package]] -name = "poly1305" -version = "0.7.2" +name = "polling" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" dependencies = [ - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.1", + "cfg-if", + "concurrent-queue", + "hermit-abi 0.3.9", + "pin-project-lite 0.2.14", + "rustix 0.38.32", + "tracing", + "windows-sys 0.52.0", ] [[package]] -name = "polyval" -version = "0.5.3" +name = "poly1305" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ - "cfg-if 1.0.0", "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.1", + "opaque-debug 0.3.1", + "universal-hash", ] [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.5.1", + "opaque-debug 0.3.1", + "universal-hash", ] [[package]] @@ -6021,7 +6114,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -6043,21 +6136,31 @@ dependencies = [ "termtree", ] +[[package]] +name = "prettier-please" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22020dfcf177fcc7bf5deaf7440af371400c67c0de14c399938d8ed4fb4645d3" +dependencies = [ + "proc-macro2 1.0.81", + "syn 2.0.60", +] + [[package]] name = "prettyplease" -version = "0.1.25" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" +checksum = "f28f53e8b192565862cf99343194579a022eb9c7dd3a8d03134734803c7b3125" dependencies = [ - "proc-macro2 1.0.76", + "proc-macro2 1.0.81", "syn 1.0.109", ] [[package]] name = "primitive-types" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", @@ -6069,12 +6172,21 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ - "once_cell", - "toml_edit 0.19.14", + "thiserror", + "toml 0.5.11", +] + +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", ] [[package]] @@ -6083,7 +6195,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "toml_edit 0.21.0", + "toml_edit 0.21.1", ] [[package]] @@ -6093,8 +6205,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", "version_check", ] @@ -6105,26 +6217,20 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.20+deprecated" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc375e1527247fe1a97d8b7156678dfe7c1af2fc075c9a4db3690ecd2a148068" - [[package]] name = "proc-macro-warning" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" +checksum = "834da187cfe638ae8abb0203f0b33e5ccdb02a28e7199f2f47b3e2754f50edca" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -6138,9 +6244,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.76" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -6151,7 +6257,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fnv", "lazy_static", "memchr", @@ -6173,13 +6279,13 @@ dependencies = [ [[package]] name = "prometheus-client-derive-encode" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b6a5217beb0ad503ee7fa752d451c905113d70721b937126158f3106a48cc1" +checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 1.0.109", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -6200,7 +6306,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck 0.4.1", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -6221,9 +6327,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", - "proc-macro2 1.0.76", - "quote 1.0.35", + "itertools 0.10.5", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] @@ -6284,17 +6390,35 @@ dependencies = [ "pin-project-lite 0.1.12", ] +[[package]] +name = "quinn" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" +dependencies = [ + "bytes", + "pin-project-lite 0.2.14", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.20.9", + "thiserror", + "tokio", + "tracing", + "webpki", +] + [[package]] name = "quinn-proto" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c10f662eee9c94ddd7135043e544f3c82fa839a1e7b865911331961b53186c" +checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" dependencies = [ "bytes", - "rand", + "rand 0.8.5", "ring 0.16.20", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "slab", "thiserror", "tinyvec", @@ -6302,6 +6426,19 @@ dependencies = [ "webpki", ] +[[package]] +name = "quinn-udp" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4" +dependencies = [ + "libc", + "quinn-proto", + "socket2 0.4.10", + "tracing", + "windows-sys 0.42.0", +] + [[package]] name = "quote" version = "0.6.13" @@ -6313,11 +6450,11 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.76", + "proc-macro2 1.0.81", ] [[package]] @@ -6326,6 +6463,19 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom 0.1.16", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc", +] + [[package]] name = "rand" version = "0.8.5" @@ -6333,10 +6483,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", + "rand_chacha 0.3.1", "rand_core 0.6.4", ] +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -6362,7 +6522,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.10", + "getrandom 0.2.14", ] [[package]] @@ -6372,7 +6532,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", - "rand", + "rand 0.8.5", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", ] [[package]] @@ -6383,9 +6552,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -6393,9 +6562,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -6444,42 +6613,42 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.10", - "redox_syscall 0.2.16", + "getrandom 0.2.14", + "libredox", "thiserror", ] [[package]] name = "ref-cast" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1641819477c319ef452a075ac34a4be92eb9ba09f6841f62d594d50fdcf0bf6b" +checksum = "c4846d4c50d1721b1a3bef8af76924eef20d5e723647333798c1b519b3a9473f" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68bf53dad9b6086826722cdc99140793afd9f62faa14a1ad07eb4f955e7a7216" +checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -6496,14 +6665,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.1" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.3", - "regex-syntax 0.7.4", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -6517,13 +6686,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.3" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39354c10dd07468c2e73926b23bb9c2caca74c5501e38a35da70406f1d923310" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.3", ] [[package]] @@ -6534,9 +6703,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "relay-bridge-hub-kusama-client" @@ -6703,6 +6872,7 @@ dependencies = [ [[package]] name = "relay-substrate-client" version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-std", "async-trait", @@ -6714,7 +6884,7 @@ dependencies = [ "frame-support", "frame-system", "futures", - "jsonrpsee 0.17.1", + "jsonrpsee", "log", "num-traits", "pallet-balances", @@ -6723,7 +6893,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "parity-scale-codec", - "rand", + "rand 0.8.5", "relay-utils", "sc-chain-spec", "sc-rpc-api", @@ -6744,6 +6914,7 @@ dependencies = [ [[package]] name = "relay-utils" version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "ansi_term", "anyhow", @@ -6802,7 +6973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ "hmac 0.12.1", - "subtle 2.4.1", + "subtle 2.5.0", ] [[package]] @@ -6831,11 +7002,26 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.14", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys 0.52.0", +] + [[package]] name = "rlp" version = "0.5.2" @@ -6905,9 +7091,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.15" +version = "0.36.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37f1bd5ef1b5422177b7646cba67430579cfe2ace80f284fee876bca52ad941" +checksum = "305efbd14fde4139eb501df5f136994bb520b033fa9fbdce287507dc23b8c7ed" dependencies = [ "bitflags 1.3.2", "errno", @@ -6933,22 +7119,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.1", + "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys 0.4.12", + "linux-raw-sys 0.4.13", "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" dependencies = [ "log", "ring 0.16.20", @@ -6958,16 +7144,30 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.5" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ea77c539259495ce8ca47f53e66ae0330a8819f67e23ac96ca02f50e7b7d36" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.16.20", - "rustls-webpki", + "ring 0.17.8", + "rustls-webpki 0.101.7", "sct", ] +[[package]] +name = "rustls" +version = "0.22.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99008d7ad0bbbea527ec27bddbc0e432c5b87d8175178cee68d2eec9c4a1813c" +dependencies = [ + "log", + "ring 0.17.8", + "rustls-pki-types", + "rustls-webpki 0.102.2", + "subtle 2.5.0", + "zeroize", +] + [[package]] name = "rustls-native-certs" version = "0.6.3" @@ -6975,44 +7175,84 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.1.2", + "rustls-pki-types", "schannel", "security-framework", ] [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.7", +] + +[[package]] +name = "rustls-pemfile" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.21.2", + "base64 0.22.0", + "rustls-pki-types", ] +[[package]] +name = "rustls-pki-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" + [[package]] name = "rustls-webpki" -version = "0.101.4" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.16.20", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring 0.17.8", + "rustls-pki-types", + "untrusted 0.9.0", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "ruzstd" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3ffab8f9715a0d455df4bbb9d21e91135aab3cd3ca187af0cd0c3c3f868fdc" +checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" dependencies = [ "byteorder", - "thiserror-core", + "derive_more", "twox-hash", ] @@ -7029,9 +7269,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "safe_arch" @@ -7054,7 +7294,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "log", "sp-core", @@ -7064,10 +7304,10 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +version = "28.0.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "array-bytes 6.1.0", + "array-bytes 6.2.2", "docify", "log", "memmap2", @@ -7081,28 +7321,29 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-core", - "sp-crypto-hashing", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-genesis-builder", "sp-io", "sp-runtime", "sp-state-machine", + "sp-tracing 16.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sc-chain-spec-derive" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "sc-client-api" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "fnv", "futures", @@ -7129,16 +7370,16 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-trait", "futures", "futures-timer", - "libp2p-identity", "log", "mockall", "parking_lot 0.12.1", "sc-client-api", + "sc-network-types", "sc-utils", "serde", "sp-api", @@ -7154,7 +7395,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -7177,7 +7418,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "polkavm", "sc-allocator", @@ -7190,7 +7431,7 @@ dependencies = [ [[package]] name = "sc-executor-polkavm" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "log", "polkavm", @@ -7201,14 +7442,14 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.29.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "anyhow", - "cfg-if 1.0.0", + "cfg-if", "libc", "log", "parking_lot 0.12.1", - "rustix 0.36.15", + "rustix 0.36.17", "sc-allocator", "sc-executor-common", "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", @@ -7219,7 +7460,7 @@ dependencies = [ [[package]] name = "sc-mixnet" version = "0.4.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "array-bytes 4.2.0", "arrayvec 0.7.4", @@ -7227,7 +7468,6 @@ dependencies = [ "bytes", "futures", "futures-timer", - "libp2p-identity", "log", "mixnet", "multiaddr", @@ -7235,6 +7475,7 @@ dependencies = [ "parking_lot 0.12.1", "sc-client-api", "sc-network", + "sc-network-types", "sc-transaction-pool-api", "sp-api", "sp-consensus", @@ -7248,13 +7489,14 @@ dependencies = [ [[package]] name = "sc-network" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "array-bytes 6.1.0", - "async-channel", + "array-bytes 6.2.2", + "async-channel 1.9.0", "async-trait", "asynchronous-codec", "bytes", + "cid 0.9.0", "either", "fnv", "futures", @@ -7262,16 +7504,22 @@ dependencies = [ "ip_network", "libp2p", "linked_hash_set", + "litep2p", "log", "mockall", + "once_cell", "parity-scale-codec", "parking_lot 0.12.1", "partial_sort", "pin-project", - "rand", + "prost", + "prost-build", + "rand 0.8.5", "sc-client-api", "sc-network-common", + "sc-network-types", "sc-utils", + "schnellru", "serde", "serde_json", "smallvec", @@ -7284,6 +7532,7 @@ dependencies = [ "tokio", "tokio-stream", "unsigned-varint", + "void", "wasm-timer", "zeroize", ] @@ -7291,7 +7540,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -7300,17 +7549,32 @@ dependencies = [ "parity-scale-codec", "prost-build", "sc-consensus", + "sc-network-types", "sp-consensus", "sp-consensus-grandpa", "sp-runtime", ] +[[package]] +name = "sc-network-types" +version = "0.10.0-dev" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" +dependencies = [ + "bs58 0.4.0", + "libp2p-identity", + "litep2p", + "multiaddr", + "multihash 0.17.0", + "rand 0.8.5", + "thiserror", +] + [[package]] name = "sc-rpc-api" version = "0.33.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "jsonrpsee 0.22.2", + "jsonrpsee", "parity-scale-codec", "sc-chain-spec", "sc-mixnet", @@ -7328,7 +7592,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "15.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "chrono", "futures", @@ -7336,7 +7600,8 @@ dependencies = [ "log", "parking_lot 0.12.1", "pin-project", - "rand", + "rand 0.8.5", + "sc-network", "sc-utils", "serde", "serde_json", @@ -7347,7 +7612,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-trait", "futures", @@ -7363,9 +7628,9 @@ dependencies = [ [[package]] name = "sc-utils" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "async-channel", + "async-channel 1.9.0", "futures", "futures-timer", "lazy_static", @@ -7377,79 +7642,79 @@ dependencies = [ [[package]] name = "scale-bits" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "036575c29af9b6e4866ffb7fa055dbf623fe7a9cc159b33786de6013a6969d89" +checksum = "662d10dcd57b1c2a3c41c9cf68f71fb09747ada1ea932ad961aca7e2ca28315f" dependencies = [ "parity-scale-codec", "scale-info", + "scale-type-resolver", "serde", ] [[package]] name = "scale-decode" -version = "0.9.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7789f5728e4e954aaa20cadcc370b99096fb8645fca3c9333ace44bb18f30095" +checksum = "afc79ba56a1c742f5aeeed1f1801f3edf51f7e818f0a54582cac6f131364ea7b" dependencies = [ "derive_more", "parity-scale-codec", "primitive-types", "scale-bits", "scale-decode-derive", - "scale-info", + "scale-type-resolver", "smallvec", ] [[package]] name = "scale-decode-derive" -version = "0.9.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27873eb6005868f8cc72dcfe109fae664cf51223d35387bc2f28be4c28d94c47" +checksum = "5398fdb3c7bea3cb419bac4983aadacae93fe1a7b5f693f4ebd98c3821aad7a5" dependencies = [ "darling 0.14.4", - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "scale-encode" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d70cb4b29360105483fac1ed567ff95d65224a14dd275b6303ed0a654c78de5" +checksum = "628800925a33794fb5387781b883b5e14d130fece9af5a63613867b8de07c5c7" dependencies = [ "derive_more", "parity-scale-codec", "primitive-types", "scale-bits", "scale-encode-derive", - "scale-info", + "scale-type-resolver", "smallvec", ] [[package]] name = "scale-encode-derive" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "995491f110efdc6bea96d6a746140e32bfceb4ea47510750a5467295a4707a25" +checksum = "7a304e1af7cdfbe7a24e08b012721456cc8cecdedadc14b3d10513eada63233c" dependencies = [ "darling 0.14.4", - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro-crate 1.1.3", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" dependencies = [ "bitvec", - "cfg-if 1.0.0", + "cfg-if", "derive_more", "parity-scale-codec", "scale-info-derive", @@ -7458,21 +7723,44 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro-crate 1.1.3", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "scale-type-resolver" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10b800069bfd43374e0f96f653e0d46882a2cb16d6d961ac43bea80f26c76843" +dependencies = [ + "scale-info", + "smallvec", +] + +[[package]] +name = "scale-typegen" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d470fa75e71b12b3244a4113adc4bc49891f3daba2054703cacd06256066397e" +dependencies = [ + "proc-macro2 1.0.81", + "quote 1.0.36", + "scale-info", + "syn 2.0.60", + "thiserror", +] + [[package]] name = "scale-value" -version = "0.12.0" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6538d1cc1af9c0baf401c57da8a6d4730ef582db0d330d2efa56ec946b5b0283" +checksum = "c07ccfee963104335c971aaf8b7b0e749be8569116322df23f1f75c4ca9e4a28" dependencies = [ "base58", "blake2 0.10.6", @@ -7484,17 +7772,18 @@ dependencies = [ "scale-decode", "scale-encode", "scale-info", + "scale-type-resolver", "serde", "yap", ] [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7503,43 +7792,27 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" dependencies = [ - "ahash 0.8.7", - "cfg-if 1.0.0", + "ahash 0.8.11", + "cfg-if", "hashbrown 0.13.2", ] -[[package]] -name = "schnorrkel" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "844b7645371e6ecdf61ff246ba1958c29e802881a749ae3fb1993675d210d28d" -dependencies = [ - "arrayref", - "arrayvec 0.7.4", - "curve25519-dalek-ng", - "merlin", - "rand_core 0.6.4", - "sha2 0.9.9", - "subtle-ng", - "zeroize", -] - [[package]] name = "schnorrkel" version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ - "aead 0.5.2", + "aead", "arrayref", "arrayvec 0.7.4", - "curve25519-dalek 4.1.1", + "curve25519-dalek 4.1.2", "getrandom_or_panic", "merlin", "rand_core 0.6.4", "serde_bytes", - "sha2 0.10.7", - "subtle 2.4.1", + "sha2 0.10.8", + "subtle 2.5.0", "zeroize", ] @@ -7551,12 +7824,27 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.16.20", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", +] + +[[package]] +name = "sctp-proto" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f64cef148d3295c730c3cb340b0b252a4d570b1c7d4bf0808f88540b0a888bc" +dependencies = [ + "bytes", + "crc", + "fxhash", + "log", + "rand 0.8.5", + "slab", + "thiserror", ] [[package]] @@ -7570,24 +7858,24 @@ dependencies = [ "generic-array 0.14.7", "pkcs8", "serdect", - "subtle 2.4.1", + "subtle 2.5.0", "zeroize", ] [[package]] name = "secp256k1" -version = "0.28.0" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" +checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" dependencies = [ "cc", ] @@ -7603,9 +7891,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc758eb7bffce5b308734e9b0c1468893cae9ff70ebf13e7090be8dcbcc83a8" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -7616,9 +7904,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f51d0c0d83bec45f16480d0ce0058397a69e48fcdc52d1dc8855fb68acbd31a7" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -7626,15 +7914,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] @@ -7650,22 +7938,22 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -7697,10 +7985,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", +] + +[[package]] +name = "sha-1" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", + "sha1-asm", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.7", +] + +[[package]] +name = "sha1-asm" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ba6947745e7f86be3b8af00b7355857085dbdf8901393c89514510eb61f4e21" +dependencies = [ + "cc", ] [[package]] @@ -7710,19 +8030,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.0", + "opaque-debug 0.3.1", ] [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -7739,9 +8059,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -7762,8 +8082,8 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4aa94397e2023af5b7cff5b8d4785e935cfb77f0e4aab0cae3b26258ace556" dependencies = [ - "async-io", - "futures-lite", + "async-io 1.13.0", + "futures-lite 1.13.0", "libc", "signal-hook", ] @@ -7779,9 +8099,15 @@ dependencies = [ [[package]] name = "signature" -version = "2.1.0" +version = "1.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" + +[[package]] +name = "signature" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e1788eed21689f9cf370582dfc467ef36ed9c707f073528ddafa8d83e3b8500" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest 0.10.7", "rand_core 0.6.4", @@ -7800,6 +8126,15 @@ dependencies = [ "wide", ] +[[package]] +name = "simple-dns" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cae9a3fcdadafb6d97f4c0e007e4247b114ee0f119f650c3cbf3a8b3a1479694" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "simple-mermaid" version = "0.1.1" @@ -7808,15 +8143,15 @@ checksum = "620a1d43d70e142b1d46a929af51d44f383db9c7a2ec122de2cd992ccfcf3c18" [[package]] name = "siphasher" -version = "0.3.10" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6528351c9bc8ab22353f9d776db39a20288e8d6c37ef8cfe3317cf875eecfc2d" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] @@ -7833,59 +8168,61 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d7400c0eff44aa2fcb5e31a5f24ba9716ed90138769e4977a2ba6014ae63eb5" dependencies = [ - "async-channel", + "async-channel 1.9.0", "futures-core", "futures-io", ] [[package]] name = "smallvec" -version = "1.11.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "smol" -version = "1.3.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" +checksum = "e635339259e51ef85ac7aa29a1cd991b957047507288697a690e80ab97d07cad" dependencies = [ - "async-channel", + "async-channel 2.2.1", "async-executor", "async-fs", - "async-io", - "async-lock", + "async-io 2.3.2", + "async-lock 3.3.0", "async-net", "async-process", "blocking", - "futures-lite", + "futures-lite 2.3.0", ] [[package]] name = "smoldot" -version = "0.8.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cce5e2881b30bad7ef89f383a816ad0b22c45915911f28499026de4a76d20ee" +checksum = "e6d1eaa97d77be4d026a1e7ffad1bb3b78448763b357ea6f8188d3e6f736a9b9" dependencies = [ "arrayvec 0.7.4", - "async-lock", - "atomic", - "base64 0.21.2", + "async-lock 3.3.0", + "atomic-take", + "base64 0.21.7", "bip39", "blake2-rfc", - "bs58 0.5.0", + "bs58 0.5.1", + "chacha20", "crossbeam-queue", "derive_more", - "ed25519-zebra", + "ed25519-zebra 4.0.3", "either", - "event-listener", + "event-listener 4.0.3", "fnv", - "futures-channel", + "futures-lite 2.3.0", "futures-util", "hashbrown 0.14.3", "hex", "hmac 0.12.1", - "itertools", + "itertools 0.12.1", + "libm", "libsecp256k1", "merlin", "no-std-net", @@ -7895,75 +8232,83 @@ dependencies = [ "num-traits", "pbkdf2", "pin-project", - "rand", - "rand_chacha", + "poly1305", + "rand 0.8.5", + "rand_chacha 0.3.1", "ruzstd", - "schnorrkel 0.10.2", + "schnorrkel", "serde", "serde_json", - "sha2 0.10.7", + "sha2 0.10.8", + "sha3", "siphasher", "slab", "smallvec", - "smol", - "snow", "soketto", - "tiny-keccak", "twox-hash", "wasmi", + "x25519-dalek 2.0.1", + "zeroize", ] [[package]] name = "smoldot-light" -version = "0.6.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2f7b4687b83ff244ef6137735ed5716ad37dcdf3ee16c4eb1a32fb9808fa47" +checksum = "5496f2d116b7019a526b1039ec2247dd172b8670633b1a64a614c9ea12c9d8c7" dependencies = [ - "async-lock", + "async-channel 2.2.1", + "async-lock 3.3.0", + "base64 0.21.7", "blake2-rfc", "derive_more", "either", - "event-listener", + "event-listener 4.0.3", "fnv", "futures-channel", + "futures-lite 2.3.0", "futures-util", "hashbrown 0.14.3", "hex", - "itertools", + "itertools 0.12.1", "log", - "lru 0.10.1", + "lru 0.12.3", + "no-std-net", "parking_lot 0.12.1", - "rand", + "pin-project", + "rand 0.8.5", + "rand_chacha 0.3.1", "serde", "serde_json", "siphasher", "slab", "smol", "smoldot", + "zeroize", ] [[package]] name = "snow" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.9.4", + "aes-gcm", "blake2 0.10.6", "chacha20poly1305", - "curve25519-dalek 4.1.1", + "curve25519-dalek 4.1.2", "rand_core 0.6.4", - "ring 0.16.20", + "ring 0.17.8", "rustc_version", - "sha2 0.10.7", - "subtle 2.4.1", + "sha2 0.10.8", + "subtle 2.5.0", ] [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -7971,12 +8316,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7992,14 +8337,14 @@ dependencies = [ "http", "httparse", "log", - "rand", - "sha-1", + "rand 0.8.5", + "sha-1 0.9.8", ] [[package]] name = "sp-api" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "hash-db", "log", @@ -8021,21 +8366,21 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "15.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "Inflector", "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "sp-application-crypto" version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "scale-info", @@ -8048,8 +8393,9 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "23.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ + "docify", "integer-sqrt", "num-traits", "parity-scale-codec", @@ -8080,20 +8426,19 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "scale-info", "sp-api", "sp-application-crypto", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-blockchain" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "futures", "log", @@ -8111,7 +8456,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-trait", "futures", @@ -8123,31 +8468,10 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sp-consensus-beefy" -version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" -dependencies = [ - "lazy_static", - "parity-scale-codec", - "scale-info", - "serde", - "sp-api", - "sp-application-crypto", - "sp-core", - "sp-crypto-hashing", - "sp-io", - "sp-keystore", - "sp-mmr-primitives", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "strum 0.24.1", -] - [[package]] name = "sp-consensus-grandpa" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "finality-grandpa", "log", @@ -8159,55 +8483,53 @@ dependencies = [ "sp-core", "sp-keystore", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-consensus-slots" version = "0.32.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "scale-info", "serde", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-timestamp", ] [[package]] name = "sp-core" version = "28.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "array-bytes 6.1.0", + "array-bytes 6.2.2", "bandersnatch_vrfs", - "bip39", "bitflags 1.3.2", "blake2 0.10.6", "bounded-collections", - "bs58 0.5.0", + "bs58 0.5.1", "dyn-clonable", - "ed25519-zebra", + "ed25519-zebra 3.1.0", "futures", "hash-db", "hash256-std-hasher", "impl-serde", - "itertools", + "itertools 0.10.5", "k256", "libsecp256k1", "log", "merlin", + "parity-bip39", "parity-scale-codec", "parking_lot 0.12.1", "paste", "primitive-types", - "rand", + "rand 0.8.5", "scale-info", - "schnorrkel 0.11.4", + "schnorrkel", "secp256k1", "secrecy", "serde", - "sp-crypto-hashing", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", @@ -8221,25 +8543,10 @@ dependencies = [ "zeroize", ] -[[package]] -name = "sp-core-hashing" -version = "9.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ee599a8399448e65197f9a6cee338ad192e9023e35e31f22382964c3c174c68" -dependencies = [ - "blake2b_simd", - "byteorder", - "digest 0.10.7", - "sha2 0.10.7", - "sha3", - "sp-std 8.0.0", - "twox-hash", -] - [[package]] name = "sp-crypto-ec-utils" version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -8254,36 +8561,49 @@ dependencies = [ "ark-ed-on-bls12-381-bandersnatch-ext", "ark-scale", "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] name = "sp-crypto-hashing" -version = "0.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc9927a7f81334ed5b8a98a4a978c81324d12bd9713ec76b5c68fd410174c5eb" +dependencies = [ + "blake2b_simd", + "byteorder", + "digest 0.10.7", + "sha2 0.10.8", + "sha3", + "twox-hash", +] + +[[package]] +name = "sp-crypto-hashing" +version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "blake2b_simd", "byteorder", "digest 0.10.7", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "twox-hash", ] [[package]] name = "sp-crypto-hashing-proc-macro" -version = "0.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "quote 1.0.35", - "sp-crypto-hashing", - "syn 2.0.52", + "quote 1.0.36", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", + "syn 2.0.60", ] [[package]] name = "sp-database" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -8292,77 +8612,75 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "environmental", "parity-scale-codec", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "environmental", "parity-scale-codec", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", "sp-storage 19.0.0 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] name = "sp-genesis-builder" -version = "0.7.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +version = "0.8.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ + "parity-scale-codec", + "scale-info", "serde_json", "sp-api", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-inherents" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-trait", "impl-trait-for-tuples", "parity-scale-codec", "scale-info", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "thiserror", ] [[package]] name = "sp-io" version = "30.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bytes", - "ed25519-dalek", + "ed25519-dalek 2.1.1", "libsecp256k1", "log", "parity-scale-codec", @@ -8370,7 +8688,7 @@ dependencies = [ "rustversion", "secp256k1", "sp-core", - "sp-crypto-hashing", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-keystore", "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", @@ -8385,17 +8703,17 @@ dependencies = [ [[package]] name = "sp-keyring" version = "31.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "sp-core", "sp-runtime", - "strum 0.24.1", + "strum", ] [[package]] name = "sp-keystore" version = "0.34.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -8406,7 +8724,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "thiserror", "zstd 0.12.4", @@ -8415,48 +8733,28 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.6.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-metadata 16.0.0", "parity-scale-codec", "scale-info", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-mixnet" version = "0.4.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "scale-info", "sp-api", "sp-application-crypto", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", -] - -[[package]] -name = "sp-mmr-primitives" -version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" -dependencies = [ - "ckb-merkle-mountain-range 0.5.2", - "log", - "parity-scale-codec", - "scale-info", - "serde", - "sp-api", - "sp-core", - "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "thiserror", ] [[package]] name = "sp-panic-handler" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "backtrace", "lazy_static", @@ -8466,7 +8764,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "rustc-hash", "serde", @@ -8476,7 +8774,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "31.0.1" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "docify", "either", @@ -8485,7 +8783,7 @@ dependencies = [ "log", "parity-scale-codec", "paste", - "rand", + "rand 0.8.5", "scale-info", "serde", "simple-mermaid", @@ -8495,13 +8793,12 @@ dependencies = [ "sp-io", "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-weights", - "tuplex", ] [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -8520,7 +8817,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -8539,33 +8836,33 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "sp-session" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", "scale-info", @@ -8574,13 +8871,12 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-staking", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-staking" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8588,24 +8884,22 @@ dependencies = [ "serde", "sp-core", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-state-machine" version = "0.35.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "hash-db", "log", "parity-scale-codec", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "smallvec", "sp-core", "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-panic-handler", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-trie", "thiserror", "tracing", @@ -8615,90 +8909,79 @@ dependencies = [ [[package]] name = "sp-statement-store" version = "10.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "aes-gcm 0.10.2", - "curve25519-dalek 4.1.1", - "ed25519-dalek", + "aes-gcm", + "curve25519-dalek 4.1.2", + "ed25519-dalek 2.1.1", "hkdf", "parity-scale-codec", - "rand", + "rand 0.8.5", "scale-info", - "sha2 0.10.7", + "sha2 0.10.8", "sp-api", "sp-application-crypto", "sp-core", - "sp-crypto-hashing", + "sp-crypto-hashing 0.1.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "sp-runtime", "sp-runtime-interface 24.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "thiserror", - "x25519-dalek 2.0.0", + "x25519-dalek 2.0.1", ] -[[package]] -name = "sp-std" -version = "8.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53458e3c57df53698b3401ec0934bea8e8cfce034816873c0b0abbd83d7bac0d" - [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "impl-serde", "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", ] [[package]] name = "sp-timestamp" version = "26.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "async-trait", "parity-scale-codec", "sp-inherents", "sp-runtime", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "thiserror", ] [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "tracing", "tracing-core", "tracing-subscriber", @@ -8707,10 +8990,9 @@ dependencies = [ [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", "tracing", "tracing-core", "tracing-subscriber", @@ -8719,21 +9001,20 @@ dependencies = [ [[package]] name = "sp-trie" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", "hash-db", "lazy_static", "memory-db", "nohash-hasher", "parity-scale-codec", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "scale-info", "schnellru", "sp-core", "sp-externalities 0.25.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "thiserror", "tracing", "trie-db", @@ -8743,7 +9024,7 @@ dependencies = [ [[package]] name = "sp-version" version = "29.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8760,44 +9041,40 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "13.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "anyhow", "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", "wasmtime", ] [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "anyhow", "impl-trait-for-tuples", "log", "parity-scale-codec", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk)", - "wasmtime", ] [[package]] name = "sp-weights" version = "27.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "bounded-collections", "parity-scale-codec", @@ -8806,7 +9083,6 @@ dependencies = [ "smallvec", "sp-arithmetic", "sp-debug-derive 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", - "sp-std 14.0.0 (git+https://github.com/paritytech/polkadot-sdk?branch=master)", ] [[package]] @@ -8823,9 +9099,9 @@ checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", "der", @@ -8833,14 +9109,14 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.41.0" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfc443bad666016e012538782d9e3006213a7db43e9fb1dda91657dc06a6fa08" +checksum = "4743ce898933fbff7bbf414f497c459a782d496269644b3d650a398ae6a487ba" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "serde", "serde_json", "unicode-xid 0.2.4", @@ -8855,9 +9131,9 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "staging-xcm" version = "7.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ - "array-bytes 6.1.0", + "array-bytes 6.2.2", "bounded-collections", "derivative", "environmental", @@ -8873,7 +9149,7 @@ dependencies = [ [[package]] name = "staging-xcm-builder" version = "7.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "frame-support", "frame-system", @@ -8895,7 +9171,7 @@ dependencies = [ [[package]] name = "staging-xcm-executor" version = "7.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "environmental", "frame-benchmarking", @@ -8919,6 +9195,26 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "str0m" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee48572247f422dcbe68630c973f8296fbd5157119cd36a3223e48bf83d47727" +dependencies = [ + "combine", + "crc", + "hmac 0.12.1", + "once_cell", + "openssl", + "openssl-sys", + "rand 0.8.5", + "sctp-proto", + "serde", + "sha-1 0.10.1", + "thiserror", + "tracing", +] + [[package]] name = "strsim" version = "0.8.0" @@ -8950,71 +9246,49 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", ] [[package]] name = "strum" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" -dependencies = [ - "strum_macros 0.24.3", -] - -[[package]] -name = "strum" -version = "0.26.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" -dependencies = [ - "strum_macros 0.26.1", -] - -[[package]] -name = "strum_macros" -version = "0.24.3" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.76", - "quote 1.0.35", - "rustversion", - "syn 1.0.109", + "strum_macros", ] [[package]] name = "strum_macros" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "rustversion", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] name = "substrate-bip39" version = "0.4.7" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "hmac 0.12.1", "pbkdf2", - "schnorrkel 0.11.4", - "sha2 0.10.7", + "schnorrkel", + "sha2 0.10.8", "zeroize", ] [[package]] name = "substrate-prometheus-endpoint" version = "0.17.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "hyper", "log", @@ -9025,7 +9299,7 @@ dependencies = [ [[package]] name = "substrate-relay" -version = "1.2.0" +version = "1.4.0" dependencies = [ "anyhow", "async-std", @@ -9072,7 +9346,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "structopt", - "strum 0.26.2", + "strum", "substrate-relay-helper", "tempfile", ] @@ -9080,6 +9354,7 @@ dependencies = [ [[package]] name = "substrate-relay-helper" version = "0.1.0" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "anyhow", "async-std", @@ -9089,7 +9364,6 @@ dependencies = [ "bp-parachains", "bp-polkadot-core", "bp-relayers", - "bp-rococo", "bp-runtime", "bridge-runtime-common", "equivocation-detector", @@ -9107,17 +9381,16 @@ dependencies = [ "pallet-bridge-messages", "pallet-bridge-parachains", "pallet-grandpa", - "pallet-transaction-payment", "parachains-relay", "parity-scale-codec", - "relay-bridge-hub-rococo-client", - "relay-bridge-hub-westend-client", - "relay-rococo-client", + "rbtag", "relay-substrate-client", "relay-utils", "sp-consensus-grandpa", "sp-core", "sp-runtime", + "structopt", + "strum", "thiserror", ] @@ -9129,21 +9402,15 @@ checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" [[package]] name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "subtle-ng" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "subxt" -version = "0.32.1" +version = "0.35.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588b8ce92699eeb06290f4fb02dad4f7e426c4e6db4d53889c6bcbc808cf24ac" +checksum = "bd68bef23f4de5e513ab4c29af69053e232b098f9c87ab552d7ea153b4a1fbc5" dependencies = [ "async-trait", "base58", @@ -9154,7 +9421,8 @@ dependencies = [ "futures", "hex", "impl-serde", - "jsonrpsee 0.20.1", + "instant", + "jsonrpsee", "parity-scale-codec", "primitive-types", "scale-bits", @@ -9164,39 +9432,42 @@ dependencies = [ "scale-value", "serde", "serde_json", - "sp-core-hashing", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "subxt-lightclient", "subxt-macro", "subxt-metadata", "thiserror", + "tokio-util", "tracing", + "url", ] [[package]] name = "subxt-codegen" -version = "0.32.1" +version = "0.35.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98f5a534c8d475919e9c845d51fc2316da4fcadd04fe17552d932d2106de930e" +checksum = "9d9e2b256b71d31a2629e44eb9cbfd944eb7d577c9e0c8e9802cc3c3943af2d9" dependencies = [ "frame-metadata 16.0.0", "heck 0.4.1", "hex", - "jsonrpsee 0.20.1", + "jsonrpsee", "parity-scale-codec", - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "scale-info", + "scale-typegen", "subxt-metadata", - "syn 2.0.52", + "syn 2.0.60", "thiserror", "tokio", ] [[package]] name = "subxt-lightclient" -version = "0.32.1" +version = "0.35.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10fd0ac9b091211f962b6ae19e26cd08e0b86efa064dfb7fac69c8f79f122329" +checksum = "1d51f1ac12e3be7aafea4d037730a57da4f22f2e9c73955666081ffa2697c6f1" dependencies = [ "futures", "futures-util", @@ -9211,27 +9482,31 @@ dependencies = [ [[package]] name = "subxt-macro" -version = "0.32.1" +version = "0.35.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e8be9ab6fe88b8c13edbe15911e148482cfb905a8b8d5b8d766a64c54be0bd" +checksum = "98dc84d7e6a0abd7ed407cce0bf60d7d58004f699460cffb979640717d1ab506" dependencies = [ - "darling 0.20.3", + "darling 0.20.8", + "parity-scale-codec", "proc-macro-error", + "quote 1.0.36", + "scale-typegen", "subxt-codegen", - "syn 2.0.52", + "syn 2.0.60", ] [[package]] name = "subxt-metadata" -version = "0.32.1" +version = "0.35.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6898275765d36a37e5ef564358e0341cf41b5f3a91683d7d8b859381b65ac8a" +checksum = "cc10c54028d079a9f1be65188707cd29e5ffd8d0031a2b1346a0941d57b7ab7e" dependencies = [ + "derive_more", "frame-metadata 16.0.0", + "hashbrown 0.14.3", "parity-scale-codec", "scale-info", - "sp-core-hashing", - "thiserror", + "sp-crypto-hashing 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -9251,19 +9526,19 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.52" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "unicode-ident", ] @@ -9273,19 +9548,19 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", + "proc-macro2 1.0.81", + "quote 1.0.36", "syn 1.0.109", "unicode-xid 0.2.4", ] [[package]] name = "sysinfo" -version = "0.30.7" +version = "0.30.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c385888ef380a852a16209afc8cfad22795dd8873d69c9a14d2e2088f118d18" +checksum = "87341a165d73787554941cd5ef55ad728011566fe714e987d1b976c15dbc3a83" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "core-foundation-sys", "libc", "ntapi", @@ -9323,9 +9598,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.10" +version = "0.12.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2faeef5759ab89935255b1a4cd98e0baf99d1085e37d36599c625dac49ae8e" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" [[package]] name = "tempfile" @@ -9333,17 +9608,17 @@ version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ - "cfg-if 1.0.0", - "fastrand 2.0.1", - "rustix 0.38.31", + "cfg-if", + "fastrand 2.0.2", + "rustix 0.38.32", "windows-sys 0.52.0", ] [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -9365,59 +9640,39 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] -[[package]] -name = "thiserror-core" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d97345f6437bb2004cd58819d8a9ef8e36cdd7661c2abc4bbde0a7c40d9f497" -dependencies = [ - "thiserror-core-impl", -] - -[[package]] -name = "thiserror-core-impl" -version = "1.0.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" -dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 1.0.109", -] - [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] [[package]] name = "time" -version = "0.3.34" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", @@ -9438,9 +9693,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ "num-conv", "time-core", @@ -9472,9 +9727,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -9482,8 +9737,8 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.12", - "socket2 0.5.5", + "pin-project-lite 0.2.14", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -9494,9 +9749,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -9505,33 +9760,59 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.5", + "rustls 0.21.10", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.3", + "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "tokio", "tokio-util", ] +[[package]] +name = "tokio-tungstenite" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" +dependencies = [ + "futures-util", + "log", + "rustls 0.21.10", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", + "tungstenite", +] + [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "tokio", "tracing", ] @@ -9547,14 +9828,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af06656561d28735e9c1cd63dfd57132c8155426aa6af24f36a00a351f88c48e" +checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.7", + "toml_edit 0.22.11", ] [[package]] @@ -9568,37 +9849,37 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.19.14" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.6", "toml_datetime", - "winnow 0.5.0", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.21.0" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.6", "toml_datetime", - "winnow 0.5.0", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.7" +version = "0.22.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18769cd1cec395d70860ceb4d932812a0b4d06b1a4bb336745a4d21b9496e992" +checksum = "fb686a972ccef8537b39eead3968b0e8616cb5040dbb9bba93007c8e07c9215f" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.5", + "winnow 0.6.6", ] [[package]] @@ -9610,7 +9891,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "tower-layer", "tower-service", "tracing", @@ -9630,26 +9911,25 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if 1.0.0", "log", - "pin-project-lite 0.2.12", + "pin-project-lite 0.2.14", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -9674,55 +9954,40 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ - "lazy_static", "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", + "once_cell", "tracing-core", ] [[package]] name = "tracing-subscriber" -version = "0.2.25" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ - "ansi_term", - "chrono", - "lazy_static", "matchers", + "nu-ansi-term", + "once_cell", "regex", - "serde", - "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", - "tracing-serde", ] [[package]] name = "trie-db" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff28e0f815c2fea41ebddf148e008b077d2faddb026c9555b29696114d602642" +checksum = "65ed83be775d85ebb0e272914fff6462c39b3ddd6dc67b5c1c41271aad280c69" dependencies = [ "hash-db", - "hashbrown 0.13.2", "log", "rustc-hex", "smallvec", @@ -9744,18 +10009,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", - "enum-as-inner", + "enum-as-inner 0.5.1", "futures-channel", "futures-io", "futures-util", "idna 0.2.3", "ipnet", "lazy_static", - "rand", + "rand 0.8.5", + "smallvec", + "socket2 0.4.10", + "thiserror", + "tinyvec", + "tokio", + "tracing", + "url", +] + +[[package]] +name = "trust-dns-proto" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3119112651c157f4488931a01e586aa459736e9d6046d3bd9105ffb69352d374" +dependencies = [ + "async-trait", + "cfg-if", + "data-encoding", + "enum-as-inner 0.6.0", + "futures-channel", + "futures-io", + "futures-util", + "idna 0.4.0", + "ipnet", + "once_cell", + "rand 0.8.5", "smallvec", - "socket2 0.4.9", "thiserror", "tinyvec", "tokio", @@ -9769,7 +10059,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", @@ -9780,14 +10070,35 @@ dependencies = [ "thiserror", "tokio", "tracing", - "trust-dns-proto", + "trust-dns-proto 0.22.0", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a3e6c3aff1718b3c73e395d1f35202ba2ffa847c6a62eea0db8fb4cfe30be6" +dependencies = [ + "cfg-if", + "futures-util", + "ipconfig", + "lru-cache", + "once_cell", + "parking_lot 0.12.1", + "rand 0.8.5", + "resolv-conf", + "smallvec", + "thiserror", + "tokio", + "tracing", + "trust-dns-proto 0.23.2", ] [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tt-call" @@ -9796,10 +10107,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f4f195fd851901624eee5a58c4bb2b4f06399148fcd0ed336e6f1cb60a9881df" [[package]] -name = "tuplex" -version = "0.1.2" +name = "tungstenite" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "676ac81d5454c4dcf37955d34fa8626ede3490f744b86ca14a7b90168d2a08aa" +checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" +dependencies = [ + "byteorder", + "bytes", + "data-encoding", + "http", + "httparse", + "log", + "rand 0.8.5", + "rustls 0.21.10", + "sha1", + "thiserror", + "url", + "utf-8", +] [[package]] name = "twox-hash" @@ -9807,17 +10132,17 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 0.1.10", + "cfg-if", "digest 0.10.7", - "rand", + "rand 0.8.5", "static_assertions", ] [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "uint" @@ -9833,15 +10158,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" @@ -9854,15 +10179,15 @@ dependencies = [ [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -9876,16 +10201,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "universal-hash" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.4.1", -] - [[package]] name = "universal-hash" version = "0.5.1" @@ -9893,19 +10208,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea" dependencies = [ "crypto-common", - "subtle 2.4.1", + "subtle 2.5.0", ] [[package]] name = "unsigned-varint" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" +checksum = "6889a77d49f1f013504cec6bf97a2c730394adedaeb1deb5ea08949a50541105" dependencies = [ "asynchronous-codec", "bytes", "futures-io", "futures-util", + "tokio-util", ] [[package]] @@ -9914,17 +10230,29 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8parse" version = "0.2.1" @@ -9939,9 +10267,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126e423afe2dd9ac52142e7e9d5ce4135d7e13776c529d27fd6bc49f19e3280b" +checksum = "74797339c3b98616c009c7c3eb53a0ce41e85c8ec66bd3db96ed132d20cfdee8" [[package]] name = "vcpkg" @@ -9982,10 +10310,10 @@ dependencies = [ "arrayref", "constcat", "digest 0.10.7", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "rand_core 0.6.4", - "sha2 0.10.7", + "sha2 0.10.8", "sha3", "thiserror", "zeroize", @@ -9993,15 +10321,15 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -10030,36 +10358,36 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -10067,32 +10395,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ - "quote 1.0.35", + "quote 1.0.36", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-instrument" @@ -10120,11 +10448,10 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.30.0" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51fb5c61993e71158abf5bb863df2674ca3ec39ed6471c64f07aeaf751d67b4" +checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" dependencies = [ - "intx", "smallvec", "spin 0.9.8", "wasmi_arena", @@ -10134,15 +10461,15 @@ dependencies = [ [[package]] name = "wasmi_arena" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "401c1f35e413fac1846d4843745589d9ec678977ab35a384db8ae7830525d468" +checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" [[package]] name = "wasmi_core" -version = "0.12.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624e6333e861ef49095d2d678b76ebf30b06bf37effca845be7e5b87c90071b7" +checksum = "dcf1a7db34bff95b85c261002720c00c3a6168256dcb93041d3fa2054d19856a" dependencies = [ "downcast-rs", "libm", @@ -10177,7 +10504,7 @@ checksum = "f907fdead3153cb9bfb7a93bbd5b62629472dc06dee83605358c64c52ed3dda9" dependencies = [ "anyhow", "bincode", - "cfg-if 1.0.0", + "cfg-if", "indexmap 1.9.3", "libc", "log", @@ -10203,7 +10530,7 @@ version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3b9daa7c14cd4fa3edbf69de994408d5f4b7b0959ac13fa69d465f6597f810d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -10213,14 +10540,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c86437fa68626fe896e5afc69234bb2b5894949083586535f200385adfd71213" dependencies = [ "anyhow", - "base64 0.21.2", + "base64 0.21.7", "bincode", "directories-next", "file-per-thread-logger", "log", - "rustix 0.36.15", + "rustix 0.36.17", "serde", - "sha2 0.10.7", + "sha2 0.10.8", "toml 0.5.11", "windows-sys 0.45.0", "zstd 0.11.2+zstd.1.5.2", @@ -10238,7 +10565,7 @@ dependencies = [ "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli", + "gimli 0.27.3", "log", "object 0.30.4", "target-lexicon", @@ -10257,7 +10584,7 @@ dependencies = [ "anyhow", "cranelift-codegen", "cranelift-native", - "gimli", + "gimli 0.27.3", "object 0.30.4", "target-lexicon", "wasmtime-environ", @@ -10271,7 +10598,7 @@ checksum = "a990198cee4197423045235bf89d3359e69bd2ea031005f4c2d901125955c949" dependencies = [ "anyhow", "cranelift-entity", - "gimli", + "gimli 0.27.3", "indexmap 1.9.3", "log", "object 0.30.4", @@ -10291,9 +10618,9 @@ dependencies = [ "addr2line 0.19.0", "anyhow", "bincode", - "cfg-if 1.0.0", + "cfg-if", "cpp_demangle", - "gimli", + "gimli 0.27.3", "log", "object 0.30.4", "rustc-demangle", @@ -10314,7 +10641,7 @@ checksum = "6e0554b84c15a27d76281d06838aed94e13a77d7bf604bbbaf548aa20eb93846" dependencies = [ "object 0.30.4", "once_cell", - "rustix 0.36.15", + "rustix 0.36.17", ] [[package]] @@ -10323,7 +10650,7 @@ version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aecae978b13f7f67efb23bd827373ace4578f2137ec110bbf6a4a7cde4121bbd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "windows-sys 0.45.0", ] @@ -10336,16 +10663,16 @@ checksum = "658cf6f325232b6760e202e5255d823da5e348fdea827eff0a2a22319000b441" dependencies = [ "anyhow", "cc", - "cfg-if 1.0.0", + "cfg-if", "indexmap 1.9.3", "libc", "log", "mach", "memfd", - "memoffset 0.8.0", + "memoffset", "paste", - "rand", - "rustix 0.36.15", + "rand 0.8.5", + "rustix 0.36.17", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", @@ -10366,9 +10693,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -10376,12 +10703,12 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.2" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07ecc0cd7cac091bf682ec5efa18b1cff79d617b84181f38b3951dbe135f607f" +checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ - "ring 0.16.20", - "untrusted", + "ring 0.17.8", + "untrusted 0.9.0", ] [[package]] @@ -10395,20 +10722,21 @@ dependencies = [ [[package]] name = "which" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2441c784c52b289a054b7201fc93253e288f094e2f4be9058343127c4226a269" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "libc", + "home", "once_cell", + "rustix 0.38.32", ] [[package]] name = "wide" -version = "0.7.11" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa469ffa65ef7e0ba0f164183697b89b854253fd31aeb92358b7b6155177d62f" +checksum = "81a1851a719f11d1d2fea40e15c72f6c00de8c142d7ac47c1441cc7e4d0d5bc6" dependencies = [ "bytemuck", "safe_arch", @@ -10416,9 +10744,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -10438,9 +10766,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -10453,34 +10781,31 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.34.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45296b64204227616fdbf2614cefa4c236b98ee64dfaaaa435207ed99fe7829f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ - "windows_aarch64_msvc 0.34.0", - "windows_i686_gnu 0.34.0", - "windows_i686_msvc 0.34.0", - "windows_x86_64_gnu 0.34.0", - "windows_x86_64_msvc 0.34.0", + "windows-core 0.51.1", + "windows-targets 0.48.5", ] [[package]] name = "windows" -version = "0.48.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-targets 0.48.1", + "windows-core 0.52.0", + "windows-targets 0.52.5", ] [[package]] -name = "windows" -version = "0.52.0" +name = "windows-core" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" +checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" dependencies = [ - "windows-core", - "windows-targets 0.52.0", + "windows-targets 0.48.5", ] [[package]] @@ -10489,7 +10814,22 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", ] [[package]] @@ -10507,7 +10847,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.1", + "windows-targets 0.48.5", ] [[package]] @@ -10516,7 +10856,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -10536,32 +10876,33 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.1" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm 0.48.0", - "windows_aarch64_msvc 0.48.0", - "windows_i686_gnu 0.48.0", - "windows_i686_msvc 0.48.0", - "windows_x86_64_gnu 0.48.0", - "windows_x86_64_gnullvm 0.48.0", - "windows_x86_64_msvc 0.48.0", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -10572,21 +10913,15 @@ checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.34.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cffbe740121affb56fad0fc0e421804adf0ae00891205213b5cecd30db881d" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -10596,21 +10931,15 @@ checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" - -[[package]] -name = "windows_i686_gnu" -version = "0.34.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2564fde759adb79129d9b4f54be42b32c89970c18ebf93124ca8870a498688ed" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -10620,21 +10949,21 @@ checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] -name = "windows_i686_msvc" -version = "0.34.0" +name = "windows_i686_gnullvm" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cd9d32ba70453522332c14d38814bceeb747d80b3958676007acadd7e166956" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -10644,21 +10973,15 @@ checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.34.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfce6deae227ee8d356d19effc141a509cc503dfd1f850622ec4b0f84428e1f4" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -10668,15 +10991,15 @@ checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca2b8a661f7628cbd23440e50b05d705db3686f894fc9580820623656af974b1" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -10686,21 +11009,15 @@ checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.34.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19538ccc21819d01deaf88d6a17eae6596a12e9aafdbb97916fb49896d89de9" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -10710,30 +11027,30 @@ checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" -version = "0.48.0" +version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" -version = "0.5.0" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fac9742fd1ad1bd9643b991319f72dd031016d44b77039a26977eb667141e7" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] [[package]] name = "winnow" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" +checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" dependencies = [ "memchr", ] @@ -10744,7 +11061,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -10770,11 +11087,11 @@ dependencies = [ [[package]] name = "x25519-dalek" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb66477291e7e8d2b0ff1bcb900bf29489a9692816d79874bea351e7a8b6de96" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" dependencies = [ - "curve25519-dalek 4.1.1", + "curve25519-dalek 4.1.2", "rand_core 0.6.4", "serde", "zeroize", @@ -10798,15 +11115,32 @@ dependencies = [ "time", ] +[[package]] +name = "x509-parser" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time", +] + [[package]] name = "xcm-procedural" version = "7.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#1ead59773e2dab336d2b54295419bbc3fe7c687f" +source = "git+https://github.com/paritytech/polkadot-sdk?branch=master#4eabe5e0dddc4cd31ad9dab5645350360d4d36a5" dependencies = [ "Inflector", - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -10819,7 +11153,7 @@ dependencies = [ "log", "nohash-hasher", "parking_lot 0.12.1", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -10853,9 +11187,9 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -10873,9 +11207,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.76", - "quote 1.0.35", - "syn 2.0.52", + "proc-macro2 1.0.81", + "quote 1.0.36", + "syn 2.0.60", ] [[package]] @@ -10893,7 +11227,7 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" dependencies = [ - "zstd-safe 6.0.5+zstd.1.5.4", + "zstd-safe 6.0.6", ] [[package]] @@ -10908,9 +11242,9 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "6.0.5+zstd.1.5.4" +version = "6.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56d9e60b4b1758206c238a10165fbcae3ca37b01744e394c463463f6529d23b" +checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" dependencies = [ "libc", "zstd-sys", @@ -10918,11 +11252,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 78953c9ae3725028e286accc5d3e84ff80bb7839..2fe9952f99f3a41dc381b4152ddc3c72de992f7d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,54 +6,17 @@ license = "GPL-3.0-only" [workspace] resolver = "2" - members = [ - "bin/runtime-common", - "modules/beefy", - "modules/grandpa", - "modules/messages", - "modules/parachains", - "modules/relayers", - "modules/xcm-bridge-hub", - "modules/xcm-bridge-hub-router", - "primitives/beefy", - "primitives/chain-asset-hub-rococo", - "primitives/chain-asset-hub-westend", - "primitives/chain-bridge-hub-cumulus", - "primitives/chain-bridge-hub-kusama", - "primitives/chain-bridge-hub-polkadot", - "primitives/chain-bridge-hub-rococo", - "primitives/chain-bridge-hub-westend", - "primitives/chain-kusama", - "primitives/chain-polkadot", - "primitives/chain-polkadot-bulletin", - "primitives/chain-rococo", - "primitives/chain-westend", - "primitives/header-chain", - "primitives/messages", - "primitives/parachains", - "primitives/polkadot-core", - "primitives/relayers", - "primitives/runtime", - "primitives/test-utils", - "primitives/xcm-bridge-hub-router", - "relays/bin-substrate", - "relays/client-bridge-hub-kusama", - "relays/client-bridge-hub-polkadot", - "relays/client-bridge-hub-rococo", - "relays/client-bridge-hub-westend", - "relays/client-kusama", - "relays/client-polkadot", - "relays/client-polkadot-bulletin", - "relays/client-rococo", - "relays/client-substrate", - "relays/client-westend", - "relays/equivocation", - "relays/finality", - "relays/lib-substrate-relay", - "relays/messages", - "relays/parachains", - "relays/utils", + "relay-clients/client-bridge-hub-kusama", + "relay-clients/client-bridge-hub-polkadot", + "relay-clients/client-bridge-hub-rococo", + "relay-clients/client-bridge-hub-westend", + "relay-clients/client-kusama", + "relay-clients/client-polkadot", + "relay-clients/client-polkadot-bulletin", + "relay-clients/client-rococo", + "relay-clients/client-westend", + "substrate-relay", ] # Setup clippy lints as `polkadot-sdk`, @@ -89,7 +52,7 @@ complexity = { level = "deny", priority = 1 } [workspace.dependencies] log = { version = "0.4.20", default-features = false } -quote = { version = "1.0.33" } +quote = { version = "1.0.36" } serde = { version = "1.0.197", default-features = false } -serde_json = { version = "1.0.114", default-features = false } -thiserror = { version = "1.0.48" } +serde_json = { version = "1.0.115", default-features = false } +thiserror = { version = "1.0.59" } diff --git a/Dockerfile b/Dockerfile index 99831af410d6b1dab4dcd4d217b8df29cd92ed4d..cb5be719580b4990c26d20f9047053b6cb276f07 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,7 +8,7 @@ # # See the `deployments/README.md` for all the available `PROJECT` values. -FROM docker.io/paritytech/bridges-ci:production as builder +FROM docker.io/paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 as builder USER root WORKDIR /parity-bridges-common diff --git a/README.md b/README.md index a2ce213d2541c346361eb28125a06e3079e1c269..466ac3e7de5bd841d97f68e0cae6086e6a32468b 100644 --- a/README.md +++ b/README.md @@ -38,10 +38,10 @@ cargo test --all ``` Also you can build the repo with [Parity CI Docker -image](https://github.com/paritytech/scripts/tree/master/dockerfiles/bridges-ci): +image](https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-unified): ```bash -docker pull paritytech/bridges-ci:production +docker pull paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 mkdir ~/cache chown 1000:1000 ~/cache #processes in the container runs as "nonroot" user with UID 1000 docker run --rm -it -w /shellhere/parity-bridges-common \ @@ -49,7 +49,7 @@ docker run --rm -it -w /shellhere/parity-bridges-common \ -v "$(pwd)":/shellhere/parity-bridges-common \ -e CARGO_HOME=/cache/cargo/ \ -e SCCACHE_DIR=/cache/sccache/ \ - -e CARGO_TARGET_DIR=/cache/target/ paritytech/bridges-ci:production cargo build --all + -e CARGO_TARGET_DIR=/cache/target/ paritytech/ci-unified:bullseye-1.77.0-2024-04-10-v20240408 cargo build --all #artifacts can be found in ~/cache/target ``` diff --git a/RELEASE.md b/RELEASE.md index e45bedddeaa0399ad367e95c9ce96918580aa2bf..cb00ceb0d7954f167265a16ce20939a4a7ceda2c 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -6,16 +6,16 @@ come first and details come in the last sections. ### Making a Release All releases are supposed to be done from the -[`polkadot-staging` branch](https://github.com/paritytech/parity-bridges-common/tree/polkadot-staging). +[`master` branch](https://github.com/paritytech/parity-bridges-common/tree/master). This branch is assumed to contain changes, that are reviewed and audited. To prepare a release: 1. Make sure all required changes are merged to the - [`polkadot-staging` branch](https://github.com/paritytech/parity-bridges-common/tree/polkadot-staging); + [`master` branch](https://github.com/paritytech/parity-bridges-common/tree/master); 2. Select release version: go to the `Cargo.toml` of `substrate-relay` crate - ([here](https://github.com/paritytech/parity-bridges-common/blob/polkadot-staging/relays/bin-substrate/Cargo.toml#L3)) + ([here](https://github.com/paritytech/parity-bridges-common/blob/master/relays/bin-substrate/Cargo.toml#L3)) to look for the latest version. Then increment the minor or major version. **NOTE**: we are not going to properly support [semver](https://semver.org) @@ -28,11 +28,11 @@ To prepare a release: It could be combined with the (1) if changes are not large. Make sure to add the [`A-release`](https://github.com/paritytech/parity-bridges-common/labels/A-release) label to your PR - in the future we'll add workflow to make pre-releases - when such PR is merged to the `polkadot-staging` branch; + when such PR is merged to the `master` branch; 4. Wait for approvals and merge PR, mentioned in (3); -5. Checkout updated `polkadot-staging` branch and do `git pull`; +5. Checkout updated `master` branch and do `git pull`; 6. Make a new git tag with the `substrate-relay` version: ```sh @@ -123,15 +123,15 @@ support it. Normally it means: 1. Bumping bundled chain versions in following places: -- for `Rococo` and `RBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/polkadot-staging/relays/bin-substrate/src/chains/rococo.rs); +- for `Rococo` and `RBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/master/relays/bin-substrate/src/chains/rococo.rs); -- for `Westend` and `WBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/polkadot-staging/relays/bin-substrate/src/chains/westend.rs); +- for `Westend` and `WBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/master/relays/bin-substrate/src/chains/westend.rs); -- for `Kusama` and `KBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/polkadot-staging/relays/bin-substrate/src/chains/polkadot.rs) +- for `Kusama` and `KBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/master/relays/bin-substrate/src/chains/polkadot.rs) -- for `Polkadot` and `PBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/polkadot-staging/relays/bin-substrate/src/chains/polkadot.rs); +- for `Polkadot` and `PBH`: [here](https://github.com/paritytech/parity-bridges-common/blob/master/relays/bin-substrate/src/chains/polkadot.rs); -- for `PBC`: [here](https://github.com/paritytech/parity-bridges-common/blob/polkadot-staging/relays/bin-substrate/src/chains/polkadot_bulletin.rs). +- for `PBC`: [here](https://github.com/paritytech/parity-bridges-common/blob/master/relays/bin-substrate/src/chains/polkadot_bulletin.rs). 2. Regenerating bundled runtime wrapper code using `runtime-codegen` binary: diff --git a/bin/.keep b/bin/.keep deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/bin/runtime-common/Cargo.toml b/bin/runtime-common/Cargo.toml deleted file mode 100644 index 4d1872d8cd5a22a153bed0660a3d83aecd2476ac..0000000000000000000000000000000000000000 --- a/bin/runtime-common/Cargo.toml +++ /dev/null @@ -1,103 +0,0 @@ -[package] -name = "bridge-runtime-common" -version = "0.7.0" -description = "Common types and functions that may be used by substrate-based runtimes of all bridged chains" -authors.workspace = true -edition.workspace = true -repository.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -hash-db = { version = "0.16.0", default-features = false } -log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -static_assertions = { version = "1.1", optional = true } -tuplex = { version = "0.1", default-features = false } - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-parachains = { path = "../../primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-relayers = { path = "../../primitives/relayers", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } -pallet-bridge-grandpa = { path = "../../modules/grandpa", default-features = false } -pallet-bridge-messages = { path = "../../modules/messages", default-features = false } -pallet-bridge-parachains = { path = "../../modules/parachains", default-features = false } -pallet-bridge-relayers = { path = "../../modules/relayers", default-features = false } - -# Substrate dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -# Polkadot dependencies -xcm = { package = "staging-xcm", git = "https://github.com/paritytech/polkadot-sdk", default-features = false , branch = "master" } -xcm-builder = { package = "staging-xcm-builder", git = "https://github.com/paritytech/polkadot-sdk", default-features = false , branch = "master" } - -[dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-messages/std", - "bp-parachains/std", - "bp-polkadot-core/std", - "bp-relayers/std", - "bp-runtime/std", - "bp-xcm-bridge-hub-router/std", - "bp-xcm-bridge-hub/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "hash-db/std", - "log/std", - "pallet-bridge-grandpa/std", - "pallet-bridge-messages/std", - "pallet-bridge-parachains/std", - "pallet-bridge-relayers/std", - "pallet-transaction-payment/std", - "pallet-utility/std", - "scale-info/std", - "sp-api/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", - "tuplex/std", - "xcm-builder/std", - "xcm/std", -] -runtime-benchmarks = [ - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-bridge-grandpa/runtime-benchmarks", - "pallet-bridge-messages/runtime-benchmarks", - "pallet-bridge-parachains/runtime-benchmarks", - "pallet-bridge-relayers/runtime-benchmarks", - "pallet-transaction-payment/runtime-benchmarks", - "pallet-utility/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", -] -integrity-test = ["static_assertions"] diff --git a/bin/runtime-common/src/extensions/priority_calculator.rs b/bin/runtime-common/src/extensions/priority_calculator.rs deleted file mode 100644 index 0c53018330ea0ebc2fbacb32808e01a9ec88960f..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/extensions/priority_calculator.rs +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Bridge transaction priority calculator. -//! -//! We want to prioritize message delivery transactions with more messages over -//! transactions with less messages. That's because we reject delivery transactions -//! if it contains already delivered message. And if some transaction delivers -//! single message with nonce `N`, then the transaction with nonces `N..=N+100` will -//! be rejected. This can lower bridge throughput down to one message per block. - -use bp_messages::MessageNonce; -use frame_support::traits::Get; -use sp_runtime::transaction_validity::TransactionPriority; - -// reexport everything from `integrity_tests` module -#[allow(unused_imports)] -pub use integrity_tests::*; - -/// Compute priority boost for message delivery transaction that delivers -/// given number of messages. -pub fn compute_priority_boost( - messages: MessageNonce, -) -> TransactionPriority -where - PriorityBoostPerMessage: Get, -{ - // we don't want any boost for transaction with single message => minus one - PriorityBoostPerMessage::get().saturating_mul(messages.saturating_sub(1)) -} - -#[cfg(not(feature = "integrity-test"))] -mod integrity_tests {} - -#[cfg(feature = "integrity-test")] -mod integrity_tests { - use super::compute_priority_boost; - - use bp_messages::MessageNonce; - use bp_runtime::PreComputedSize; - use frame_support::{ - dispatch::{DispatchClass, DispatchInfo, Pays, PostDispatchInfo}, - traits::Get, - }; - use pallet_bridge_messages::WeightInfoExt; - use pallet_transaction_payment::OnChargeTransaction; - use sp_runtime::{ - traits::{Dispatchable, UniqueSaturatedInto, Zero}, - transaction_validity::TransactionPriority, - FixedPointOperand, SaturatedConversion, Saturating, - }; - - type BalanceOf = - <::OnChargeTransaction as OnChargeTransaction< - T, - >>::Balance; - - /// Ensures that the value of `PriorityBoostPerMessage` matches the value of - /// `tip_boost_per_message`. - /// - /// We want two transactions, `TX1` with `N` messages and `TX2` with `N+1` messages, have almost - /// the same priority if we'll add `tip_boost_per_message` tip to the `TX1`. We want to be sure - /// that if we add plain `PriorityBoostPerMessage` priority to `TX1`, the priority will be close - /// to `TX2` as well. - pub fn ensure_priority_boost_is_sane( - tip_boost_per_message: BalanceOf, - ) where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - PriorityBoostPerMessage: Get, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, - { - let priority_boost_per_message = PriorityBoostPerMessage::get(); - let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); - for messages in 1..=maximal_messages_in_delivery_transaction { - let base_priority = estimate_message_delivery_transaction_priority::< - Runtime, - MessagesInstance, - >(messages, Zero::zero()); - let priority_boost = compute_priority_boost::(messages); - let priority_with_boost = base_priority + priority_boost; - - let tip = tip_boost_per_message.saturating_mul((messages - 1).unique_saturated_into()); - let priority_with_tip = - estimate_message_delivery_transaction_priority::(1, tip); - - const ERROR_MARGIN: TransactionPriority = 5; // 5% - if priority_with_boost.abs_diff(priority_with_tip).saturating_mul(100) / - priority_with_tip > - ERROR_MARGIN - { - panic!( - "The PriorityBoostPerMessage value ({}) must be fixed to: {}", - priority_boost_per_message, - compute_priority_boost_per_message::( - tip_boost_per_message - ), - ); - } - } - } - - /// Compute priority boost that we give to message delivery transaction for additional message. - #[cfg(feature = "integrity-test")] - fn compute_priority_boost_per_message( - tip_boost_per_message: BalanceOf, - ) -> TransactionPriority - where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, - { - // esimate priority of transaction that delivers one message and has large tip - let maximal_messages_in_delivery_transaction = - Runtime::MaxUnconfirmedMessagesAtInboundLane::get(); - let small_with_tip_priority = - estimate_message_delivery_transaction_priority::( - 1, - tip_boost_per_message - .saturating_mul(maximal_messages_in_delivery_transaction.saturated_into()), - ); - // estimate priority of transaction that delivers maximal number of messages, but has no tip - let large_without_tip_priority = estimate_message_delivery_transaction_priority::< - Runtime, - MessagesInstance, - >(maximal_messages_in_delivery_transaction, Zero::zero()); - - small_with_tip_priority - .saturating_sub(large_without_tip_priority) - .saturating_div(maximal_messages_in_delivery_transaction - 1) - } - - /// Estimate message delivery transaction priority. - #[cfg(feature = "integrity-test")] - fn estimate_message_delivery_transaction_priority( - messages: MessageNonce, - tip: BalanceOf, - ) -> TransactionPriority - where - Runtime: - pallet_transaction_payment::Config + pallet_bridge_messages::Config, - MessagesInstance: 'static, - Runtime::RuntimeCall: Dispatchable, - BalanceOf: Send + Sync + FixedPointOperand, - { - // just an estimation of extra transaction bytes that are added to every transaction - // (including signature, signed extensions extra and etc + in our case it includes - // all call arguments extept the proof itself) - let base_tx_size = 512; - // let's say we are relaying similar small messages and for every message we add more trie - // nodes to the proof (x0.5 because we expect some nodes to be reused) - let estimated_message_size = 512; - // let's say all our messages have the same dispatch weight - let estimated_message_dispatch_weight = >::WeightInfo::message_dispatch_weight( - estimated_message_size - ); - // messages proof argument size is (for every message) messages size + some additional - // trie nodes. Some of them are reused by different messages, so let's take 2/3 of default - // "overhead" constant - let messages_proof_size = >::WeightInfo::expected_extra_storage_proof_size() - .saturating_mul(2) - .saturating_div(3) - .saturating_add(estimated_message_size) - .saturating_mul(messages as _); - - // finally we are able to estimate transaction size and weight - let transaction_size = base_tx_size.saturating_add(messages_proof_size); - let transaction_weight = >::WeightInfo::receive_messages_proof_weight( - &PreComputedSize(transaction_size as _), - messages as _, - estimated_message_dispatch_weight.saturating_mul(messages), - ); - - pallet_transaction_payment::ChargeTransactionPayment::::get_priority( - &DispatchInfo { - weight: transaction_weight, - class: DispatchClass::Normal, - pays_fee: Pays::Yes, - }, - transaction_size as _, - tip, - Zero::zero(), - ) - } -} diff --git a/bin/runtime-common/src/extensions/refund_relayer_extension.rs b/bin/runtime-common/src/extensions/refund_relayer_extension.rs deleted file mode 100644 index a60c31af3f92cef7ea5769df7c36049960b98aa9..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/extensions/refund_relayer_extension.rs +++ /dev/null @@ -1,2937 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Transaction extension that refunds relayer if he has delivered some new messages. -//! It also refunds transaction cost if the transaction is an `utility.batchAll()` -//! with calls that are: delivering new messsage and all necessary underlying headers -//! (parachain or relay chain). - -use crate::{ - messages_call_ext::{ - CallHelper as MessagesCallHelper, CallInfo as MessagesCallInfo, MessagesCallSubType, - }, - RefundableParachainId, -}; -use bp_messages::{LaneId, MessageNonce}; -use bp_relayers::{ExplicitOrAccountParams, RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::{RangeInclusiveExt, StaticStrProvider}; -use codec::{Codec, Decode, Encode}; -use frame_support::{ - dispatch::{CallableCallFor, DispatchInfo, PostDispatchInfo}, - traits::IsSubType, - weights::Weight, - CloneNoBound, DefaultNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, -}; -use pallet_bridge_grandpa::{ - CallSubType as GrandpaCallSubType, SubmitFinalityProofHelper, SubmitFinalityProofInfo, -}; -use pallet_bridge_messages::Config as MessagesConfig; -use pallet_bridge_parachains::{ - BoundedBridgeGrandpaConfig, CallSubType as ParachainsCallSubType, Config as ParachainsConfig, - RelayBlockNumber, SubmitParachainHeadsHelper, SubmitParachainHeadsInfo, -}; -use pallet_bridge_relayers::{ - Config as RelayersConfig, Pallet as RelayersPallet, WeightInfoExt as _, -}; -use pallet_transaction_payment::{Config as TransactionPaymentConfig, OnChargeTransaction}; -use pallet_utility::{Call as UtilityCall, Config as UtilityConfig, Pallet as UtilityPallet}; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{ - AsSystemOriginSigner, DispatchInfoOf, Dispatchable, Get, PostDispatchInfoOf, - TransactionExtension, TransactionExtensionBase, ValidateResult, Zero, - }, - transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionValidityError, ValidTransactionBuilder, - }, - DispatchResult, FixedPointOperand, RuntimeDebug, -}; -use sp_std::{marker::PhantomData, vec, vec::Vec}; - -type AccountIdOf = ::AccountId; -// without this typedef rustfmt fails with internal err -type BalanceOf = - <::OnChargeTransaction as OnChargeTransaction>::Balance; -type CallOf = ::RuntimeCall; - -/// Trait identifying a bridged messages lane. A relayer might be refunded for delivering messages -/// coming from this lane. -pub trait RefundableMessagesLaneId { - /// The instance of the bridge messages pallet. - type Instance: 'static; - /// The messages lane id. - type Id: Get; -} - -/// Default implementation of `RefundableMessagesLaneId`. -pub struct RefundableMessagesLane(PhantomData<(Instance, Id)>); - -impl RefundableMessagesLaneId for RefundableMessagesLane -where - Instance: 'static, - Id: Get, -{ - type Instance = Instance; - type Id = Id; -} - -/// Refund calculator. -pub trait RefundCalculator { - /// The underlying integer type in which the refund is calculated. - type Balance; - - /// Compute refund for given transaction. - fn compute_refund( - info: &DispatchInfo, - post_info: &PostDispatchInfo, - len: usize, - tip: Self::Balance, - ) -> Self::Balance; -} - -/// `RefundCalculator` implementation which refunds the actual transaction fee. -pub struct ActualFeeRefund(PhantomData); - -impl RefundCalculator for ActualFeeRefund -where - R: TransactionPaymentConfig, - CallOf: Dispatchable, - BalanceOf: FixedPointOperand, -{ - type Balance = BalanceOf; - - fn compute_refund( - info: &DispatchInfo, - post_info: &PostDispatchInfo, - len: usize, - tip: BalanceOf, - ) -> BalanceOf { - pallet_transaction_payment::Pallet::::compute_actual_fee(len as _, info, post_info, tip) - } -} - -/// Data that is crafted in `pre_dispatch` method and used at `post_dispatch`. -#[cfg_attr(test, derive(Debug, PartialEq))] -pub struct PreDispatchData { - /// Transaction submitter (relayer) account. - relayer: AccountId, - /// Type of the call. - call_info: CallInfo, -} - -/// Type of the call that the extension recognizes. -#[derive(RuntimeDebugNoBound, PartialEq)] -pub enum CallInfo { - /// Relay chain finality + parachain finality + message delivery/confirmation calls. - AllFinalityAndMsgs( - SubmitFinalityProofInfo, - SubmitParachainHeadsInfo, - MessagesCallInfo, - ), - /// Relay chain finality + message delivery/confirmation calls. - RelayFinalityAndMsgs(SubmitFinalityProofInfo, MessagesCallInfo), - /// Parachain finality + message delivery/confirmation calls. - /// - /// This variant is used only when bridging with parachain. - ParachainFinalityAndMsgs(SubmitParachainHeadsInfo, MessagesCallInfo), - /// Standalone message delivery/confirmation call. - Msgs(MessagesCallInfo), -} - -impl CallInfo { - /// Returns true if call is a message delivery call (with optional finality calls). - fn is_receive_messages_proof_call(&self) -> bool { - match self.messages_call_info() { - MessagesCallInfo::ReceiveMessagesProof(_) => true, - MessagesCallInfo::ReceiveMessagesDeliveryProof(_) => false, - } - } - - /// Returns the pre-dispatch `finality_target` sent to the `SubmitFinalityProof` call. - fn submit_finality_proof_info(&self) -> Option> { - match *self { - Self::AllFinalityAndMsgs(info, _, _) => Some(info), - Self::RelayFinalityAndMsgs(info, _) => Some(info), - _ => None, - } - } - - /// Returns mutable reference to pre-dispatch `finality_target` sent to the - /// `SubmitFinalityProof` call. - #[cfg(test)] - fn submit_finality_proof_info_mut( - &mut self, - ) -> Option<&mut SubmitFinalityProofInfo> { - match *self { - Self::AllFinalityAndMsgs(ref mut info, _, _) => Some(info), - Self::RelayFinalityAndMsgs(ref mut info, _) => Some(info), - _ => None, - } - } - - /// Returns the pre-dispatch `SubmitParachainHeadsInfo`. - fn submit_parachain_heads_info(&self) -> Option<&SubmitParachainHeadsInfo> { - match self { - Self::AllFinalityAndMsgs(_, info, _) => Some(info), - Self::ParachainFinalityAndMsgs(info, _) => Some(info), - _ => None, - } - } - - /// Returns the pre-dispatch `ReceiveMessagesProofInfo`. - fn messages_call_info(&self) -> &MessagesCallInfo { - match self { - Self::AllFinalityAndMsgs(_, _, info) => info, - Self::RelayFinalityAndMsgs(_, info) => info, - Self::ParachainFinalityAndMsgs(_, info) => info, - Self::Msgs(info) => info, - } - } -} - -/// The actions on relayer account that need to be performed because of his actions. -#[derive(RuntimeDebug, PartialEq)] -pub enum RelayerAccountAction { - /// Do nothing with relayer account. - None, - /// Reward the relayer. - Reward(AccountId, RewardsAccountParams, Reward), - /// Slash the relayer. - Slash(AccountId, RewardsAccountParams), -} - -/// Everything common among our refund transaction extensions. -pub trait RefundTransactionExtension: - 'static + Clone + Codec + sp_std::fmt::Debug + Default + Eq + PartialEq + Send + Sync + TypeInfo -{ - /// This chain runtime. - type Runtime: MessagesConfig<::Instance> - + RelayersConfig; - /// Messages pallet and lane reference. - type Msgs: RefundableMessagesLaneId; - /// Refund amount calculator. - type Refund: RefundCalculator::Reward>; - /// Priority boost calculator. - type Priority: Get; - /// Signed extension unique identifier. - type Id: StaticStrProvider; - - /// Unpack batch runtime call. - fn expand_call(call: &CallOf) -> Vec<&CallOf>; - - /// Given runtime call, check if it has supported format. Additionally, check if any of - /// (optionally batched) calls are obsolete and we shall reject the transaction. - fn parse_and_check_for_obsolete_call( - call: &CallOf, - ) -> Result, TransactionValidityError>; - - /// Check if parsed call is already obsolete. - fn check_obsolete_parsed_call( - call: &CallOf, - ) -> Result<&CallOf, TransactionValidityError>; - - /// Called from post-dispatch and shall perform additional checks (apart from messages - /// transaction success) of given call result. - fn additional_call_result_check( - relayer: &AccountIdOf, - call_info: &CallInfo, - extra_weight: &mut Weight, - extra_size: &mut u32, - ) -> bool; - - /// Given post-dispatch information, analyze the outcome of relayer call and return - /// actions that need to be performed on relayer account. - fn analyze_call_result( - pre: Option>>>, - info: &DispatchInfo, - post_info: &PostDispatchInfo, - len: usize, - result: &DispatchResult, - ) -> RelayerAccountAction, ::Reward> - { - let mut extra_weight = Weight::zero(); - let mut extra_size = 0; - - // We don't refund anything for transactions that we don't support. - let (relayer, call_info) = match pre { - Some(Some(pre)) => (pre.relayer, pre.call_info), - _ => return RelayerAccountAction::None, - }; - - // now we know that the relayer either needs to be rewarded, or slashed - // => let's prepare the correspondent account that pays reward/receives slashed amount - let reward_account_params = - RewardsAccountParams::new( - ::Id::get(), - ::Instance, - >>::BridgedChainId::get(), - if call_info.is_receive_messages_proof_call() { - RewardsAccountOwner::ThisChain - } else { - RewardsAccountOwner::BridgedChain - }, - ); - - // prepare return value for the case if the call has failed or it has not caused - // expected side effects (e.g. not all messages have been accepted) - // - // we are not checking if relayer is registered here - it happens during the slash attempt - // - // there are couple of edge cases here: - // - // - when the relayer becomes registered during message dispatch: this is unlikely + relayer - // should be ready for slashing after registration; - // - // - when relayer is registered after `validate` is called and priority is not boosted: - // relayer should be ready for slashing after registration. - let may_slash_relayer = - Self::bundled_messages_for_priority_boost(Some(&call_info)).is_some(); - let slash_relayer_if_delivery_result = may_slash_relayer - .then(|| RelayerAccountAction::Slash(relayer.clone(), reward_account_params)) - .unwrap_or(RelayerAccountAction::None); - - // We don't refund anything if the transaction has failed. - if let Err(e) = result { - log::trace!( - target: "runtime::bridge", - "{} via {:?}: relayer {:?} has submitted invalid messages transaction: {:?}", - Self::Id::STR, - ::Id::get(), - relayer, - e, - ); - return slash_relayer_if_delivery_result - } - - // Check if the `ReceiveMessagesProof` call delivered at least some of the messages that - // it contained. If this happens, we consider the transaction "helpful" and refund it. - let msgs_call_info = call_info.messages_call_info(); - if !MessagesCallHelper::::Instance>::was_successful(msgs_call_info) { - log::trace!( - target: "runtime::bridge", - "{} via {:?}: relayer {:?} has submitted invalid messages call", - Self::Id::STR, - ::Id::get(), - relayer, - ); - return slash_relayer_if_delivery_result - } - - // do additional checks - if !Self::additional_call_result_check( - &relayer, - &call_info, - &mut extra_weight, - &mut extra_size, - ) { - return slash_relayer_if_delivery_result - } - - // regarding the tip - refund that happens here (at this side of the bridge) isn't the whole - // relayer compensation. He'll receive some amount at the other side of the bridge. It shall - // (in theory) cover the tip there. Otherwise, if we'll be compensating tip here, some - // malicious relayer may use huge tips, effectively depleting account that pay rewards. The - // cost of this attack is nothing. Hence we use zero as tip here. - let tip = Zero::zero(); - - // decrease post-dispatch weight/size using extra weight/size that we know now - let post_info_len = len.saturating_sub(extra_size as usize); - let mut post_info_weight = - post_info.actual_weight.unwrap_or(info.weight).saturating_sub(extra_weight); - - // let's also replace the weight of slashing relayer with the weight of rewarding relayer - if call_info.is_receive_messages_proof_call() { - post_info_weight = post_info_weight.saturating_sub( - ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(), - ); - } - - // compute the relayer refund - let mut post_info = *post_info; - post_info.actual_weight = Some(post_info_weight); - let refund = Self::Refund::compute_refund(info, &post_info, post_info_len, tip); - - // we can finally reward relayer - RelayerAccountAction::Reward(relayer, reward_account_params, refund) - } - - /// Returns number of bundled messages `Some(_)`, if the given call info is a: - /// - /// - message delivery transaction; - /// - /// - with reasonable bundled messages that may be accepted by the messages pallet. - /// - /// This function is used to check whether the transaction priority should be - /// virtually boosted. The relayer registration (we only boost priority for registered - /// relayer transactions) must be checked outside. - fn bundled_messages_for_priority_boost(call_info: Option<&CallInfo>) -> Option { - // we only boost priority of message delivery transactions - let parsed_call = match call_info { - Some(parsed_call) if parsed_call.is_receive_messages_proof_call() => parsed_call, - _ => return None, - }; - - // compute total number of messages in transaction - let bundled_messages = parsed_call.messages_call_info().bundled_messages().saturating_len(); - - // a quick check to avoid invalid high-priority transactions - let max_unconfirmed_messages_in_confirmation_tx = ::Instance, - >>::MaxUnconfirmedMessagesAtInboundLane::get( - ); - if bundled_messages > max_unconfirmed_messages_in_confirmation_tx { - return None - } - - Some(bundled_messages) - } -} - -/// Adapter that allow implementing `sp_runtime::traits::TransactionExtension` for any -/// `RefundTransactionExtension`. -#[derive( - DefaultNoBound, - CloneNoBound, - Decode, - Encode, - EqNoBound, - PartialEqNoBound, - RuntimeDebugNoBound, - TypeInfo, -)] -pub struct RefundTransactionExtensionAdapter(T); - -impl TransactionExtensionBase - for RefundTransactionExtensionAdapter -where - CallOf: Dispatchable - + MessagesCallSubType::Instance>, -{ - const IDENTIFIER: &'static str = T::Id::STR; - type Implicit = (); -} - -impl TransactionExtension, Context> - for RefundTransactionExtensionAdapter -where - CallOf: Dispatchable - + MessagesCallSubType::Instance>, - as Dispatchable>::RuntimeOrigin: - AsSystemOriginSigner> + Clone, -{ - type Pre = Option>>; - type Val = Option; - - fn validate( - &self, - origin: as Dispatchable>::RuntimeOrigin, - call: &CallOf, - _info: &DispatchInfoOf>, - _len: usize, - _context: &mut Context, - _self_implicit: Self::Implicit, - _inherited_implication: &impl Encode, - ) -> ValidateResult> { - let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; - // this is the only relevant line of code for the `pre_dispatch` - // - // we're not calling `validate` from `pre_dispatch` directly because of performance - // reasons, so if you're adding some code that may fail here, please check if it needs - // to be added to the `pre_dispatch` as well - let parsed_call = T::parse_and_check_for_obsolete_call(call)?; - - // the following code just plays with transaction priority and never returns an error - - // we only boost priority of presumably correct message delivery transactions - let bundled_messages = match T::bundled_messages_for_priority_boost(parsed_call.as_ref()) { - Some(bundled_messages) => bundled_messages, - None => return Ok((Default::default(), parsed_call, origin)), - }; - - // we only boost priority if relayer has staked required balance - if !RelayersPallet::::is_registration_active(who) { - return Ok((Default::default(), parsed_call, origin)) - } - - // compute priority boost - let priority_boost = crate::extensions::priority_calculator::compute_priority_boost::< - T::Priority, - >(bundled_messages); - let valid_transaction = ValidTransactionBuilder::default().priority(priority_boost); - - log::trace!( - target: "runtime::bridge", - "{} via {:?} has boosted priority of message delivery transaction \ - of relayer {:?}: {} messages -> {} priority", - Self::IDENTIFIER, - ::Id::get(), - who, - bundled_messages, - priority_boost, - ); - - let validity = valid_transaction.build()?; - Ok((validity, parsed_call, origin)) - } - - fn prepare( - self, - val: Self::Val, - origin: & as Dispatchable>::RuntimeOrigin, - _call: &CallOf, - _info: &DispatchInfoOf>, - _len: usize, - _context: &Context, - ) -> Result { - let who = origin.as_system_origin_signer().ok_or(InvalidTransaction::BadSigner)?; - Ok(val.map(|call_info| { - log::trace!( - target: "runtime::bridge", - "{} via {:?} parsed bridge transaction in pre-dispatch: {:?}", - Self::IDENTIFIER, - ::Id::get(), - call_info, - ); - PreDispatchData { relayer: who.clone(), call_info } - })) - } - - fn post_dispatch( - pre: Self::Pre, - info: &DispatchInfoOf>, - post_info: &PostDispatchInfoOf>, - len: usize, - result: &DispatchResult, - _context: &Context, - ) -> Result<(), TransactionValidityError> { - let call_result = T::analyze_call_result(Some(pre), info, post_info, len, result); - - match call_result { - RelayerAccountAction::None => (), - RelayerAccountAction::Reward(relayer, reward_account, reward) => { - RelayersPallet::::register_relayer_reward( - reward_account, - &relayer, - reward, - ); - - log::trace!( - target: "runtime::bridge", - "{} via {:?} has registered reward: {:?} for {:?}", - Self::IDENTIFIER, - ::Id::get(), - reward, - relayer, - ); - }, - RelayerAccountAction::Slash(relayer, slash_account) => - RelayersPallet::::slash_and_deregister( - &relayer, - ExplicitOrAccountParams::Params(slash_account), - ), - } - - Ok(()) - } -} - -/// Transaction extension that refunds a relayer for new messages coming from a parachain. -/// -/// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`) -/// with message delivery transaction. Batch may deliver either both relay chain header and -/// parachain head, or just parachain head. Corresponding headers must be used in messages -/// proof verification. -/// -/// Extension does not refund transaction tip due to security reasons. -#[derive( - DefaultNoBound, - CloneNoBound, - Decode, - Encode, - EqNoBound, - PartialEqNoBound, - RuntimeDebugNoBound, - TypeInfo, -)] -#[scale_info(skip_type_params(Runtime, Para, Msgs, Refund, Priority, Id))] -pub struct RefundBridgedParachainMessages( - PhantomData<( - // runtime with `frame-utility`, `pallet-bridge-grandpa`, `pallet-bridge-parachains`, - // `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed - Runtime, - // implementation of `RefundableParachainId` trait, which specifies the instance of - // the used `pallet-bridge-parachains` pallet and the bridged parachain id - Para, - // implementation of `RefundableMessagesLaneId` trait, which specifies the instance of - // the used `pallet-bridge-messages` pallet and the lane within this pallet - Msgs, - // implementation of the `RefundCalculator` trait, that is used to compute refund that - // we give to relayer for his transaction - Refund, - // getter for per-message `TransactionPriority` boost that we give to message - // delivery transactions - Priority, - // the runtime-unique identifier of this signed extension - Id, - )>, -); - -impl RefundTransactionExtension - for RefundBridgedParachainMessages -where - Self: 'static + Send + Sync, - RefundBridgedGrandpaMessages< - Runtime, - Runtime::BridgesGrandpaPalletInstance, - Msgs, - Refund, - Priority, - Id, - >: 'static + Send + Sync, - Runtime: UtilityConfig> - + BoundedBridgeGrandpaConfig - + ParachainsConfig - + MessagesConfig - + RelayersConfig, - Para: RefundableParachainId, - Msgs: RefundableMessagesLaneId, - Refund: RefundCalculator, - Priority: Get, - Id: StaticStrProvider, - CallOf: Dispatchable - + IsSubType, Runtime>> - + GrandpaCallSubType - + ParachainsCallSubType - + MessagesCallSubType, -{ - type Runtime = Runtime; - type Msgs = Msgs; - type Refund = Refund; - type Priority = Priority; - type Id = Id; - - fn expand_call(call: &CallOf) -> Vec<&CallOf> { - match call.is_sub_type() { - Some(UtilityCall::::batch_all { ref calls }) if calls.len() <= 3 => - calls.iter().collect(), - Some(_) => vec![], - None => vec![call], - } - } - - fn parse_and_check_for_obsolete_call( - call: &CallOf, - ) -> Result, TransactionValidityError> { - let calls = Self::expand_call(call); - let total_calls = calls.len(); - let mut calls = calls.into_iter().map(Self::check_obsolete_parsed_call).rev(); - - let msgs_call = calls.next().transpose()?.and_then(|c| c.call_info_for(Msgs::Id::get())); - let para_finality_call = calls - .next() - .transpose()? - .and_then(|c| c.submit_parachain_heads_info_for(Para::Id::get())); - let relay_finality_call = - calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info()); - - Ok(match (total_calls, relay_finality_call, para_finality_call, msgs_call) { - (3, Some(relay_finality_call), Some(para_finality_call), Some(msgs_call)) => Some( - CallInfo::AllFinalityAndMsgs(relay_finality_call, para_finality_call, msgs_call), - ), - (2, None, Some(para_finality_call), Some(msgs_call)) => - Some(CallInfo::ParachainFinalityAndMsgs(para_finality_call, msgs_call)), - (1, None, None, Some(msgs_call)) => Some(CallInfo::Msgs(msgs_call)), - _ => None, - }) - } - - fn check_obsolete_parsed_call( - call: &CallOf, - ) -> Result<&CallOf, TransactionValidityError> { - call.check_obsolete_submit_finality_proof()?; - call.check_obsolete_submit_parachain_heads()?; - call.check_obsolete_call()?; - Ok(call) - } - - fn additional_call_result_check( - relayer: &Runtime::AccountId, - call_info: &CallInfo, - extra_weight: &mut Weight, - extra_size: &mut u32, - ) -> bool { - // check if relay chain state has been updated - let is_granda_call_succeeded = - RefundBridgedGrandpaMessages::< - Runtime, - Runtime::BridgesGrandpaPalletInstance, - Msgs, - Refund, - Priority, - Id, - >::additional_call_result_check(relayer, call_info, extra_weight, extra_size); - if !is_granda_call_succeeded { - return false - } - - // check if parachain state has been updated - if let Some(para_proof_info) = call_info.submit_parachain_heads_info() { - if !SubmitParachainHeadsHelper::::was_successful( - para_proof_info, - ) { - // we only refund relayer if all calls have updated chain state - log::trace!( - target: "runtime::bridge", - "{} from parachain {} via {:?}: relayer {:?} has submitted invalid parachain finality proof", - Id::STR, - Para::Id::get(), - Msgs::Id::get(), - relayer, - ); - return false - } - } - - true - } -} - -/// Transaction extension that refunds a relayer for new messages coming from a standalone (GRANDPA) -/// chain. -/// -/// Also refunds relayer for successful finality delivery if it comes in batch (`utility.batchAll`) -/// with message delivery transaction. Batch may deliver either both relay chain header and -/// parachain head, or just parachain head. Corresponding headers must be used in messages proof -/// verification. -/// -/// Extension does not refund transaction tip due to security reasons. -#[derive( - DefaultNoBound, - CloneNoBound, - Decode, - Encode, - EqNoBound, - PartialEqNoBound, - RuntimeDebugNoBound, - TypeInfo, -)] -#[scale_info(skip_type_params(Runtime, GrandpaInstance, Msgs, Refund, Priority, Id))] -pub struct RefundBridgedGrandpaMessages( - PhantomData<( - // runtime with `frame-utility`, `pallet-bridge-grandpa`, - // `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed - Runtime, - // bridge GRANDPA pallet instance, used to track bridged chain state - GrandpaInstance, - // implementation of `RefundableMessagesLaneId` trait, which specifies the instance of - // the used `pallet-bridge-messages` pallet and the lane within this pallet - Msgs, - // implementation of the `RefundCalculator` trait, that is used to compute refund that - // we give to relayer for his transaction - Refund, - // getter for per-message `TransactionPriority` boost that we give to message - // delivery transactions - Priority, - // the runtime-unique identifier of this signed extension - Id, - )>, -); - -impl RefundTransactionExtension - for RefundBridgedGrandpaMessages -where - Self: 'static + Send + Sync, - Runtime: UtilityConfig> - + BoundedBridgeGrandpaConfig - + MessagesConfig - + RelayersConfig, - GrandpaInstance: 'static, - Msgs: RefundableMessagesLaneId, - Refund: RefundCalculator, - Priority: Get, - Id: StaticStrProvider, - CallOf: Dispatchable - + IsSubType, Runtime>> - + GrandpaCallSubType - + MessagesCallSubType, -{ - type Runtime = Runtime; - type Msgs = Msgs; - type Refund = Refund; - type Priority = Priority; - type Id = Id; - - fn expand_call(call: &CallOf) -> Vec<&CallOf> { - match call.is_sub_type() { - Some(UtilityCall::::batch_all { ref calls }) if calls.len() <= 2 => - calls.iter().collect(), - Some(_) => vec![], - None => vec![call], - } - } - - fn parse_and_check_for_obsolete_call( - call: &CallOf, - ) -> Result, TransactionValidityError> { - let calls = Self::expand_call(call); - let total_calls = calls.len(); - let mut calls = calls.into_iter().map(Self::check_obsolete_parsed_call).rev(); - - let msgs_call = calls.next().transpose()?.and_then(|c| c.call_info_for(Msgs::Id::get())); - let relay_finality_call = - calls.next().transpose()?.and_then(|c| c.submit_finality_proof_info()); - - Ok(match (total_calls, relay_finality_call, msgs_call) { - (2, Some(relay_finality_call), Some(msgs_call)) => - Some(CallInfo::RelayFinalityAndMsgs(relay_finality_call, msgs_call)), - (1, None, Some(msgs_call)) => Some(CallInfo::Msgs(msgs_call)), - _ => None, - }) - } - - fn check_obsolete_parsed_call( - call: &CallOf, - ) -> Result<&CallOf, TransactionValidityError> { - call.check_obsolete_submit_finality_proof()?; - call.check_obsolete_call()?; - Ok(call) - } - - fn additional_call_result_check( - relayer: &Runtime::AccountId, - call_info: &CallInfo, - extra_weight: &mut Weight, - extra_size: &mut u32, - ) -> bool { - // check if relay chain state has been updated - if let Some(finality_proof_info) = call_info.submit_finality_proof_info() { - if !SubmitFinalityProofHelper::::was_successful( - finality_proof_info.block_number, - ) { - // we only refund relayer if all calls have updated chain state - log::trace!( - target: "runtime::bridge", - "{} via {:?}: relayer {:?} has submitted invalid relay chain finality proof", - Self::Id::STR, - ::Id::get(), - relayer, - ); - return false - } - - // there's a conflict between how bridge GRANDPA pallet works and a `utility.batchAll` - // transaction. If relay chain header is mandatory, the GRANDPA pallet returns - // `Pays::No`, because such transaction is mandatory for operating the bridge. But - // `utility.batchAll` transaction always requires payment. But in both cases we'll - // refund relayer - either explicitly here, or using `Pays::No` if he's choosing - // to submit dedicated transaction. - - // submitter has means to include extra weight/bytes in the `submit_finality_proof` - // call, so let's subtract extra weight/size to avoid refunding for this extra stuff - *extra_weight = (*extra_weight).saturating_add(finality_proof_info.extra_weight); - *extra_size = (*extra_size).saturating_add(finality_proof_info.extra_size); - } - - true - } -} - -/// Transaction extension that refunds a relayer for standalone messages delivery and confirmation -/// transactions. Finality transactions are not refunded. -#[derive( - DefaultNoBound, - CloneNoBound, - Decode, - Encode, - EqNoBound, - PartialEqNoBound, - RuntimeDebugNoBound, - TypeInfo, -)] -#[scale_info(skip_type_params(Runtime, GrandpaInstance, Msgs, Refund, Priority, Id))] -pub struct RefundBridgedMessages( - PhantomData<( - // runtime with `pallet-bridge-messages` and `pallet-bridge-relayers` pallets deployed - Runtime, - // implementation of `RefundableMessagesLaneId` trait, which specifies the instance of - // the used `pallet-bridge-messages` pallet and the lane within this pallet - Msgs, - // implementation of the `RefundCalculator` trait, that is used to compute refund that - // we give to relayer for his transaction - Refund, - // getter for per-message `TransactionPriority` boost that we give to message - // delivery transactions - Priority, - // the runtime-unique identifier of this signed extension - Id, - )>, -); - -impl RefundTransactionExtension - for RefundBridgedMessages -where - Self: 'static + Send + Sync, - Runtime: MessagesConfig + RelayersConfig, - Msgs: RefundableMessagesLaneId, - Refund: RefundCalculator, - Priority: Get, - Id: StaticStrProvider, - CallOf: Dispatchable - + MessagesCallSubType, -{ - type Runtime = Runtime; - type Msgs = Msgs; - type Refund = Refund; - type Priority = Priority; - type Id = Id; - - fn expand_call(call: &CallOf) -> Vec<&CallOf> { - vec![call] - } - - fn parse_and_check_for_obsolete_call( - call: &CallOf, - ) -> Result, TransactionValidityError> { - let call = Self::check_obsolete_parsed_call(call)?; - Ok(call.call_info_for(Msgs::Id::get()).map(CallInfo::Msgs)) - } - - fn check_obsolete_parsed_call( - call: &CallOf, - ) -> Result<&CallOf, TransactionValidityError> { - call.check_obsolete_call()?; - Ok(call) - } - - fn additional_call_result_check( - _relayer: &Runtime::AccountId, - _call_info: &CallInfo, - _extra_weight: &mut Weight, - _extra_size: &mut u32, - ) -> bool { - // everything is checked by the `RefundTransactionExtension` - true - } -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - }, - messages_call_ext::{ - BaseMessagesProofInfo, ReceiveMessagesDeliveryProofInfo, ReceiveMessagesProofInfo, - UnrewardedRelayerOccupation, - }, - mock::*, - DefaultRefundableParachainId, - }; - use bp_header_chain::StoredHeaderDataBuilder; - use bp_messages::{ - DeliveredMessages, InboundLaneData, MessageNonce, MessagesOperatingMode, OutboundLaneData, - UnrewardedRelayer, UnrewardedRelayersState, - }; - use bp_parachains::{BestParaHeadHash, ParaInfo}; - use bp_polkadot_core::parachains::{ParaHeadsProof, ParaId}; - use bp_runtime::{BasicOperatingMode, HeaderId}; - use bp_test_utils::{make_default_justification, test_keyring, TEST_GRANDPA_SET_ID}; - use frame_support::{ - assert_storage_noop, parameter_types, - traits::{fungible::Mutate, ReservableCurrency}, - weights::Weight, - }; - use pallet_bridge_grandpa::{Call as GrandpaCall, Pallet as GrandpaPallet, StoredAuthoritySet}; - use pallet_bridge_messages::{Call as MessagesCall, Pallet as MessagesPallet}; - use pallet_bridge_parachains::{ - Call as ParachainsCall, Pallet as ParachainsPallet, RelayBlockHash, - }; - use sp_runtime::{ - traits::{ConstU64, DispatchTransaction, Header as HeaderT}, - transaction_validity::{InvalidTransaction, TransactionValidity, ValidTransaction}, - DispatchError, - }; - - parameter_types! { - pub TestParachain: u32 = 1000; - pub TestLaneId: LaneId = TEST_LANE_ID; - pub MsgProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( - TEST_LANE_ID, - TEST_BRIDGED_CHAIN_ID, - RewardsAccountOwner::ThisChain, - ); - pub MsgDeliveryProofsRewardsAccount: RewardsAccountParams = RewardsAccountParams::new( - TEST_LANE_ID, - TEST_BRIDGED_CHAIN_ID, - RewardsAccountOwner::BridgedChain, - ); - } - - bp_runtime::generate_static_str_provider!(TestExtension); - - type TestMessagesExtensionProvider = RefundBridgedMessages< - TestRuntime, - RefundableMessagesLane<(), TestLaneId>, - ActualFeeRefund, - ConstU64<1>, - StrTestExtension, - >; - type TestMessagesExtension = RefundTransactionExtensionAdapter; - type TestGrandpaExtensionProvider = RefundBridgedGrandpaMessages< - TestRuntime, - (), - RefundableMessagesLane<(), TestLaneId>, - ActualFeeRefund, - ConstU64<1>, - StrTestExtension, - >; - type TestGrandpaExtension = RefundTransactionExtensionAdapter; - type TestExtensionProvider = RefundBridgedParachainMessages< - TestRuntime, - DefaultRefundableParachainId<(), TestParachain>, - RefundableMessagesLane<(), TestLaneId>, - ActualFeeRefund, - ConstU64<1>, - StrTestExtension, - >; - type TestExtension = RefundTransactionExtensionAdapter; - - fn initial_balance_of_relayer_account_at_this_chain() -> ThisChainBalance { - let test_stake: ThisChainBalance = TestStake::get(); - ExistentialDeposit::get().saturating_add(test_stake * 100) - } - - // in tests, the following accounts are equal (because of how `into_sub_account_truncating` - // works) - - fn delivery_rewards_account() -> ThisChainAccountId { - TestPaymentProcedure::rewards_account(MsgProofsRewardsAccount::get()) - } - - fn confirmation_rewards_account() -> ThisChainAccountId { - TestPaymentProcedure::rewards_account(MsgDeliveryProofsRewardsAccount::get()) - } - - pub fn relayer_account_at_this_chain() -> ThisChainAccountId { - 0 - } - - fn relayer_account_at_bridged_chain() -> BridgedChainAccountId { - 0 - } - - pub fn initialize_environment( - best_relay_header_number: RelayBlockNumber, - parachain_head_at_relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) { - let authorities = test_keyring().into_iter().map(|(a, w)| (a.into(), w)).collect(); - let best_relay_header = HeaderId(best_relay_header_number, RelayBlockHash::default()); - pallet_bridge_grandpa::CurrentAuthoritySet::::put( - StoredAuthoritySet::try_new(authorities, TEST_GRANDPA_SET_ID).unwrap(), - ); - pallet_bridge_grandpa::BestFinalized::::put(best_relay_header); - pallet_bridge_grandpa::ImportedHeaders::::insert( - best_relay_header.hash(), - bp_test_utils::test_header::(0).build(), - ); - - let para_id = ParaId(TestParachain::get()); - let para_info = ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: parachain_head_at_relay_header_number, - head_hash: [parachain_head_at_relay_header_number as u8; 32].into(), - }, - next_imported_hash_position: 0, - }; - pallet_bridge_parachains::ParasInfo::::insert(para_id, para_info); - - let lane_id = TestLaneId::get(); - let in_lane_data = - InboundLaneData { last_confirmed_nonce: best_message, ..Default::default() }; - pallet_bridge_messages::InboundLanes::::insert(lane_id, in_lane_data); - - let out_lane_data = - OutboundLaneData { latest_received_nonce: best_message, ..Default::default() }; - pallet_bridge_messages::OutboundLanes::::insert(lane_id, out_lane_data); - - Balances::mint_into(&delivery_rewards_account(), ExistentialDeposit::get()).unwrap(); - Balances::mint_into(&confirmation_rewards_account(), ExistentialDeposit::get()).unwrap(); - Balances::mint_into( - &relayer_account_at_this_chain(), - initial_balance_of_relayer_account_at_this_chain(), - ) - .unwrap(); - } - - fn submit_relay_header_call(relay_header_number: RelayBlockNumber) -> RuntimeCall { - let relay_header = BridgedChainHeader::new( - relay_header_number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - let relay_justification = make_default_justification(&relay_header); - - RuntimeCall::BridgeGrandpa(GrandpaCall::submit_finality_proof { - finality_target: Box::new(relay_header), - justification: relay_justification, - }) - } - - pub fn submit_relay_header_call_ex(relay_header_number: RelayBlockNumber) -> RuntimeCall { - let relay_header = BridgedChainHeader::new( - relay_header_number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ); - let relay_justification = make_default_justification(&relay_header); - - RuntimeCall::BridgeGrandpa(GrandpaCall::submit_finality_proof_ex { - finality_target: Box::new(relay_header), - justification: relay_justification, - current_set_id: TEST_GRANDPA_SET_ID, - is_free_execution_expected: false, - }) - } - - fn submit_parachain_head_call( - parachain_head_at_relay_header_number: RelayBlockNumber, - ) -> RuntimeCall { - RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads { - at_relay_block: (parachain_head_at_relay_header_number, RelayBlockHash::default()), - parachains: vec![( - ParaId(TestParachain::get()), - [parachain_head_at_relay_header_number as u8; 32].into(), - )], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, - }) - } - - pub fn submit_parachain_head_call_ex( - parachain_head_at_relay_header_number: RelayBlockNumber, - ) -> RuntimeCall { - RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads_ex { - at_relay_block: (parachain_head_at_relay_header_number, RelayBlockHash::default()), - parachains: vec![( - ParaId(TestParachain::get()), - [parachain_head_at_relay_header_number as u8; 32].into(), - )], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, - is_free_execution_expected: false, - }) - } - - fn message_delivery_call(best_message: MessageNonce) -> RuntimeCall { - RuntimeCall::BridgeMessages(MessagesCall::receive_messages_proof { - relayer_id_at_bridged_chain: relayer_account_at_bridged_chain(), - proof: FromBridgedChainMessagesProof { - bridged_header_hash: Default::default(), - storage_proof: vec![], - lane: TestLaneId::get(), - nonces_start: pallet_bridge_messages::InboundLanes::::get( - TEST_LANE_ID, - ) - .last_delivered_nonce() + - 1, - nonces_end: best_message, - }, - messages_count: 1, - dispatch_weight: Weight::zero(), - }) - } - - fn message_confirmation_call(best_message: MessageNonce) -> RuntimeCall { - RuntimeCall::BridgeMessages(MessagesCall::receive_messages_delivery_proof { - proof: FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: Default::default(), - storage_proof: vec![], - lane: TestLaneId::get(), - }, - relayers_state: UnrewardedRelayersState { - last_delivered_nonce: best_message, - ..Default::default() - }, - }) - } - - fn parachain_finality_and_delivery_batch_call( - parachain_head_at_relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_parachain_head_call(parachain_head_at_relay_header_number), - message_delivery_call(best_message), - ], - }) - } - - fn parachain_finality_and_confirmation_batch_call( - parachain_head_at_relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_parachain_head_call(parachain_head_at_relay_header_number), - message_confirmation_call(best_message), - ], - }) - } - - fn relay_finality_and_delivery_batch_call( - relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call(relay_header_number), - message_delivery_call(best_message), - ], - }) - } - - fn relay_finality_and_delivery_batch_call_ex( - relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call_ex(relay_header_number), - message_delivery_call(best_message), - ], - }) - } - - fn relay_finality_and_confirmation_batch_call( - relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call(relay_header_number), - message_confirmation_call(best_message), - ], - }) - } - - fn relay_finality_and_confirmation_batch_call_ex( - relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call_ex(relay_header_number), - message_confirmation_call(best_message), - ], - }) - } - - fn all_finality_and_delivery_batch_call( - relay_header_number: RelayBlockNumber, - parachain_head_at_relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call(relay_header_number), - submit_parachain_head_call(parachain_head_at_relay_header_number), - message_delivery_call(best_message), - ], - }) - } - - fn all_finality_and_delivery_batch_call_ex( - relay_header_number: RelayBlockNumber, - parachain_head_at_relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call_ex(relay_header_number), - submit_parachain_head_call_ex(parachain_head_at_relay_header_number), - message_delivery_call(best_message), - ], - }) - } - - fn all_finality_and_confirmation_batch_call( - relay_header_number: RelayBlockNumber, - parachain_head_at_relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call(relay_header_number), - submit_parachain_head_call(parachain_head_at_relay_header_number), - message_confirmation_call(best_message), - ], - }) - } - - fn all_finality_and_confirmation_batch_call_ex( - relay_header_number: RelayBlockNumber, - parachain_head_at_relay_header_number: RelayBlockNumber, - best_message: MessageNonce, - ) -> RuntimeCall { - RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - submit_relay_header_call_ex(relay_header_number), - submit_parachain_head_call_ex(parachain_head_at_relay_header_number), - message_confirmation_call(best_message), - ], - }) - } - - fn all_finality_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::AllFinalityAndMsgs( - SubmitFinalityProofInfo { - block_number: 200, - current_set_id: None, - extra_weight: Weight::zero(), - extra_size: 0, - is_mandatory: false, - is_free_execution_expected: false, - }, - SubmitParachainHeadsInfo { - at_relay_block: (200, [0u8; 32].into()), - para_id: ParaId(TestParachain::get()), - para_head_hash: [200u8; 32].into(), - is_free_execution_expected: false, - }, - MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { - base: BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }, - unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), - }, - }), - ), - } - } - - fn all_finality_pre_dispatch_data_ex() -> PreDispatchData { - let mut data = all_finality_pre_dispatch_data(); - data.call_info.submit_finality_proof_info_mut().unwrap().current_set_id = - Some(TEST_GRANDPA_SET_ID); - data - } - - fn all_finality_confirmation_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::AllFinalityAndMsgs( - SubmitFinalityProofInfo { - block_number: 200, - current_set_id: None, - extra_weight: Weight::zero(), - extra_size: 0, - is_mandatory: false, - is_free_execution_expected: false, - }, - SubmitParachainHeadsInfo { - at_relay_block: (200, [0u8; 32].into()), - para_id: ParaId(TestParachain::get()), - para_head_hash: [200u8; 32].into(), - is_free_execution_expected: false, - }, - MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( - BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }, - )), - ), - } - } - - fn all_finality_confirmation_pre_dispatch_data_ex() -> PreDispatchData { - let mut data = all_finality_confirmation_pre_dispatch_data(); - data.call_info.submit_finality_proof_info_mut().unwrap().current_set_id = - Some(TEST_GRANDPA_SET_ID); - data - } - - fn relay_finality_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::RelayFinalityAndMsgs( - SubmitFinalityProofInfo { - block_number: 200, - current_set_id: None, - extra_weight: Weight::zero(), - extra_size: 0, - is_mandatory: false, - is_free_execution_expected: false, - }, - MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { - base: BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }, - unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), - }, - }), - ), - } - } - - fn relay_finality_pre_dispatch_data_ex() -> PreDispatchData { - let mut data = relay_finality_pre_dispatch_data(); - data.call_info.submit_finality_proof_info_mut().unwrap().current_set_id = - Some(TEST_GRANDPA_SET_ID); - data - } - - fn relay_finality_confirmation_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::RelayFinalityAndMsgs( - SubmitFinalityProofInfo { - block_number: 200, - current_set_id: None, - extra_weight: Weight::zero(), - extra_size: 0, - is_mandatory: false, - is_free_execution_expected: false, - }, - MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( - BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }, - )), - ), - } - } - - fn relay_finality_confirmation_pre_dispatch_data_ex() -> PreDispatchData { - let mut data = relay_finality_confirmation_pre_dispatch_data(); - data.call_info.submit_finality_proof_info_mut().unwrap().current_set_id = - Some(TEST_GRANDPA_SET_ID); - data - } - - fn parachain_finality_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::ParachainFinalityAndMsgs( - SubmitParachainHeadsInfo { - at_relay_block: (200, [0u8; 32].into()), - para_id: ParaId(TestParachain::get()), - para_head_hash: [200u8; 32].into(), - is_free_execution_expected: false, - }, - MessagesCallInfo::ReceiveMessagesProof(ReceiveMessagesProofInfo { - base: BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }, - unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), - }, - }), - ), - } - } - - fn parachain_finality_confirmation_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::ParachainFinalityAndMsgs( - SubmitParachainHeadsInfo { - at_relay_block: (200, [0u8; 32].into()), - para_id: ParaId(TestParachain::get()), - para_head_hash: [200u8; 32].into(), - is_free_execution_expected: false, - }, - MessagesCallInfo::ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo( - BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }, - )), - ), - } - } - - fn delivery_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::Msgs(MessagesCallInfo::ReceiveMessagesProof( - ReceiveMessagesProofInfo { - base: BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }, - unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: MaxUnrewardedRelayerEntriesAtInboundLane::get(), - free_message_slots: MaxUnconfirmedMessagesAtInboundLane::get(), - }, - }, - )), - } - } - - fn confirmation_pre_dispatch_data() -> PreDispatchData { - PreDispatchData { - relayer: relayer_account_at_this_chain(), - call_info: CallInfo::Msgs(MessagesCallInfo::ReceiveMessagesDeliveryProof( - ReceiveMessagesDeliveryProofInfo(BaseMessagesProofInfo { - lane_id: TEST_LANE_ID, - bundled_range: 101..=200, - best_stored_nonce: 100, - }), - )), - } - } - - fn set_bundled_range_end( - mut pre_dispatch_data: PreDispatchData, - end: MessageNonce, - ) -> PreDispatchData { - let msg_info = match pre_dispatch_data.call_info { - CallInfo::AllFinalityAndMsgs(_, _, ref mut info) => info, - CallInfo::RelayFinalityAndMsgs(_, ref mut info) => info, - CallInfo::ParachainFinalityAndMsgs(_, ref mut info) => info, - CallInfo::Msgs(ref mut info) => info, - }; - - if let MessagesCallInfo::ReceiveMessagesProof(ref mut msg_info) = msg_info { - msg_info.base.bundled_range = *msg_info.base.bundled_range.start()..=end - } - - pre_dispatch_data - } - - fn run_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestExtension = - RefundTransactionExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); - extension - .validate_only( - Some(relayer_account_at_this_chain()).into(), - &call, - &DispatchInfo::default(), - 0, - ) - .map(|res| res.0) - } - - fn run_grandpa_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestGrandpaExtension = - RefundTransactionExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); - extension - .validate_only( - Some(relayer_account_at_this_chain()).into(), - &call, - &DispatchInfo::default(), - 0, - ) - .map(|res| res.0) - } - - fn run_messages_validate(call: RuntimeCall) -> TransactionValidity { - let extension: TestMessagesExtension = - RefundTransactionExtensionAdapter(RefundBridgedMessages(PhantomData)); - extension - .validate_only( - Some(relayer_account_at_this_chain()).into(), - &call, - &DispatchInfo::default(), - 0, - ) - .map(|res| res.0) - } - - fn ignore_priority(tx: TransactionValidity) -> TransactionValidity { - tx.map(|mut tx| { - tx.priority = 0; - tx - }) - } - - fn run_pre_dispatch( - call: RuntimeCall, - ) -> Result>, TransactionValidityError> { - let extension: TestExtension = - RefundTransactionExtensionAdapter(RefundBridgedParachainMessages(PhantomData)); - extension - .validate_and_prepare( - Some(relayer_account_at_this_chain()).into(), - &call, - &DispatchInfo::default(), - 0, - ) - .map(|(pre, _)| pre) - } - - fn run_grandpa_pre_dispatch( - call: RuntimeCall, - ) -> Result>, TransactionValidityError> { - let extension: TestGrandpaExtension = - RefundTransactionExtensionAdapter(RefundBridgedGrandpaMessages(PhantomData)); - extension - .validate_and_prepare( - Some(relayer_account_at_this_chain()).into(), - &call, - &DispatchInfo::default(), - 0, - ) - .map(|(pre, _)| pre) - } - - fn run_messages_pre_dispatch( - call: RuntimeCall, - ) -> Result>, TransactionValidityError> { - let extension: TestMessagesExtension = - RefundTransactionExtensionAdapter(RefundBridgedMessages(PhantomData)); - extension - .validate_and_prepare( - Some(relayer_account_at_this_chain()).into(), - &call, - &DispatchInfo::default(), - 0, - ) - .map(|(pre, _)| pre) - } - - fn dispatch_info() -> DispatchInfo { - DispatchInfo { - weight: Weight::from_parts( - frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, - 0, - ), - class: frame_support::dispatch::DispatchClass::Normal, - pays_fee: frame_support::dispatch::Pays::Yes, - } - } - - fn post_dispatch_info() -> PostDispatchInfo { - PostDispatchInfo { actual_weight: None, pays_fee: frame_support::dispatch::Pays::Yes } - } - - fn run_post_dispatch( - pre_dispatch_data: Option>, - dispatch_result: DispatchResult, - ) { - let post_dispatch_result = TestExtension::post_dispatch( - pre_dispatch_data, - &dispatch_info(), - &post_dispatch_info(), - 1024, - &dispatch_result, - &(), - ); - assert_eq!(post_dispatch_result, Ok(())); - } - - fn expected_delivery_reward() -> ThisChainBalance { - let mut post_dispatch_info = post_dispatch_info(); - let extra_weight = ::WeightInfo::extra_weight_of_successful_receive_messages_proof_call(); - post_dispatch_info.actual_weight = - Some(dispatch_info().weight.saturating_sub(extra_weight)); - pallet_transaction_payment::Pallet::::compute_actual_fee( - 1024, - &dispatch_info(), - &post_dispatch_info, - Zero::zero(), - ) - } - - fn expected_confirmation_reward() -> ThisChainBalance { - pallet_transaction_payment::Pallet::::compute_actual_fee( - 1024, - &dispatch_info(), - &post_dispatch_info(), - Zero::zero(), - ) - } - - #[test] - fn validate_doesnt_boost_transaction_priority_if_relayer_is_not_registered() { - run_test(|| { - initialize_environment(100, 100, 100); - Balances::set_balance(&relayer_account_at_this_chain(), ExistentialDeposit::get()); - - // message delivery is failing - let fns = [run_validate, run_grandpa_validate, run_messages_validate]; - for f in fns { - assert_eq!(f(message_delivery_call(200)), Ok(Default::default()),); - assert_eq!( - f(parachain_finality_and_delivery_batch_call(200, 200)), - Ok(Default::default()), - ); - assert_eq!( - f(all_finality_and_delivery_batch_call(200, 200, 200)), - Ok(Default::default()), - ); - assert_eq!( - f(all_finality_and_delivery_batch_call_ex(200, 200, 200)), - Ok(Default::default()), - ); - } - - // message confirmation validation is passing - assert_eq!( - ignore_priority(run_validate(message_confirmation_call(200))), - Ok(Default::default()), - ); - assert_eq!( - ignore_priority(run_messages_validate(message_confirmation_call(200))), - Ok(Default::default()), - ); - assert_eq!( - ignore_priority(run_validate(parachain_finality_and_confirmation_batch_call( - 200, 200 - ))), - Ok(Default::default()), - ); - assert_eq!( - ignore_priority(run_validate(all_finality_and_confirmation_batch_call( - 200, 200, 200 - ))), - Ok(Default::default()), - ); - assert_eq!( - ignore_priority(run_validate(all_finality_and_confirmation_batch_call_ex( - 200, 200, 200 - ))), - Ok(Default::default()), - ); - }); - } - - #[test] - fn validate_boosts_priority_of_message_delivery_transactons() { - run_test(|| { - initialize_environment(100, 100, 100); - - BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) - .unwrap(); - - let fns = [run_validate, run_grandpa_validate, run_messages_validate]; - for f in fns { - let priority_of_100_messages_delivery = - f(message_delivery_call(200)).unwrap().priority; - let priority_of_200_messages_delivery = - f(message_delivery_call(300)).unwrap().priority; - assert!( - priority_of_200_messages_delivery > priority_of_100_messages_delivery, - "Invalid priorities: {} for 200 messages vs {} for 100 messages", - priority_of_200_messages_delivery, - priority_of_100_messages_delivery, - ); - - let priority_of_100_messages_confirmation = - f(message_confirmation_call(200)).unwrap().priority; - let priority_of_200_messages_confirmation = - f(message_confirmation_call(300)).unwrap().priority; - assert_eq!( - priority_of_100_messages_confirmation, - priority_of_200_messages_confirmation - ); - } - }); - } - - #[test] - fn validate_does_not_boost_priority_of_message_delivery_transactons_with_too_many_messages() { - run_test(|| { - initialize_environment(100, 100, 100); - - BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) - .unwrap(); - - let fns = [run_validate, run_grandpa_validate, run_messages_validate]; - for f in fns { - let priority_of_max_messages_delivery = - f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get())) - .unwrap() - .priority; - let priority_of_more_than_max_messages_delivery = - f(message_delivery_call(100 + MaxUnconfirmedMessagesAtInboundLane::get() + 1)) - .unwrap() - .priority; - - assert!( - priority_of_max_messages_delivery > priority_of_more_than_max_messages_delivery, - "Invalid priorities: {} for MAX messages vs {} for MAX+1 messages", - priority_of_max_messages_delivery, - priority_of_more_than_max_messages_delivery, - ); - } - }); - } - - #[test] - fn validate_allows_non_obsolete_transactions() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - ignore_priority(run_validate(message_delivery_call(200))), - Ok(ValidTransaction::default()), - ); - assert_eq!( - ignore_priority(run_validate(message_confirmation_call(200))), - Ok(ValidTransaction::default()), - ); - - assert_eq!( - ignore_priority(run_messages_validate(message_delivery_call(200))), - Ok(ValidTransaction::default()), - ); - assert_eq!( - ignore_priority(run_messages_validate(message_confirmation_call(200))), - Ok(ValidTransaction::default()), - ); - - assert_eq!( - ignore_priority(run_validate(parachain_finality_and_delivery_batch_call(200, 200))), - Ok(ValidTransaction::default()), - ); - assert_eq!( - ignore_priority(run_validate(parachain_finality_and_confirmation_batch_call( - 200, 200 - ))), - Ok(ValidTransaction::default()), - ); - - assert_eq!( - ignore_priority(run_validate(all_finality_and_delivery_batch_call(200, 200, 200))), - Ok(ValidTransaction::default()), - ); - assert_eq!( - ignore_priority(run_validate(all_finality_and_delivery_batch_call_ex( - 200, 200, 200 - ))), - Ok(ValidTransaction::default()), - ); - assert_eq!( - ignore_priority(run_validate(all_finality_and_confirmation_batch_call( - 200, 200, 200 - ))), - Ok(ValidTransaction::default()), - ); - assert_eq!( - ignore_priority(run_validate(all_finality_and_confirmation_batch_call_ex( - 200, 200, 200 - ))), - Ok(ValidTransaction::default()), - ); - }); - } - - #[test] - fn ext_rejects_batch_with_obsolete_relay_chain_header() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call(100, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call_ex(100, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_validate(all_finality_and_delivery_batch_call(100, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(all_finality_and_delivery_batch_call_ex(100, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - }); - } - - #[test] - fn ext_rejects_batch_with_obsolete_parachain_head() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call(101, 100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call_ex(101, 100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(all_finality_and_delivery_batch_call(101, 100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(all_finality_and_delivery_batch_call_ex(101, 100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_pre_dispatch(parachain_finality_and_delivery_batch_call(100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(parachain_finality_and_delivery_batch_call(100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - }); - } - - #[test] - fn ext_rejects_batch_with_obsolete_messages() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call_ex(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call_ex(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_validate(all_finality_and_delivery_batch_call(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(all_finality_and_delivery_batch_call_ex(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(all_finality_and_confirmation_batch_call(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(all_finality_and_confirmation_batch_call_ex(200, 200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_validate(parachain_finality_and_delivery_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_validate(parachain_finality_and_confirmation_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - }); - } - - #[test] - fn ext_rejects_batch_with_grandpa_finality_proof_when_grandpa_pallet_is_halted() { - run_test(|| { - initialize_environment(100, 100, 100); - - GrandpaPallet::::set_operating_mode( - RuntimeOrigin::root(), - BasicOperatingMode::Halted, - ) - .unwrap(); - - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call_ex(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call_ex(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - }); - } - - #[test] - fn ext_rejects_batch_with_parachain_finality_proof_when_parachains_pallet_is_halted() { - run_test(|| { - initialize_environment(100, 100, 100); - - ParachainsPallet::::set_operating_mode( - RuntimeOrigin::root(), - BasicOperatingMode::Halted, - ) - .unwrap(); - - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call_ex(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call_ex(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - - assert_eq!( - run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - }); - } - - #[test] - fn ext_rejects_transaction_when_messages_pallet_is_halted() { - run_test(|| { - initialize_environment(100, 100, 100); - - MessagesPallet::::set_operating_mode( - RuntimeOrigin::root(), - MessagesOperatingMode::Basic(BasicOperatingMode::Halted), - ) - .unwrap(); - - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call_ex(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call_ex(200, 200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - - assert_eq!( - run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - - assert_eq!( - run_pre_dispatch(message_delivery_call(200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - assert_eq!( - run_pre_dispatch(message_confirmation_call(200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Call)), - ); - }); - } - - #[test] - fn pre_dispatch_parses_batch_with_relay_chain_and_parachain_headers() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call(200, 200, 200)), - Ok(Some(all_finality_pre_dispatch_data())), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_delivery_batch_call_ex(200, 200, 200)), - Ok(Some(all_finality_pre_dispatch_data_ex())), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call(200, 200, 200)), - Ok(Some(all_finality_confirmation_pre_dispatch_data())), - ); - assert_eq!( - run_pre_dispatch(all_finality_and_confirmation_batch_call_ex(200, 200, 200)), - Ok(Some(all_finality_confirmation_pre_dispatch_data_ex())), - ); - }); - } - - #[test] - fn pre_dispatch_parses_batch_with_parachain_header() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_pre_dispatch(parachain_finality_and_delivery_batch_call(200, 200)), - Ok(Some(parachain_finality_pre_dispatch_data())), - ); - assert_eq!( - run_pre_dispatch(parachain_finality_and_confirmation_batch_call(200, 200)), - Ok(Some(parachain_finality_confirmation_pre_dispatch_data())), - ); - }); - } - - #[test] - fn pre_dispatch_fails_to_parse_batch_with_multiple_parachain_headers() { - run_test(|| { - initialize_environment(100, 100, 100); - - let call = RuntimeCall::Utility(UtilityCall::batch_all { - calls: vec![ - RuntimeCall::BridgeParachains(ParachainsCall::submit_parachain_heads { - at_relay_block: (100, RelayBlockHash::default()), - parachains: vec![ - (ParaId(TestParachain::get()), [1u8; 32].into()), - (ParaId(TestParachain::get() + 1), [1u8; 32].into()), - ], - parachain_heads_proof: ParaHeadsProof { storage_proof: vec![] }, - }), - message_delivery_call(200), - ], - }); - - assert_eq!(run_pre_dispatch(call), Ok(None),); - }); - } - - #[test] - fn pre_dispatch_parses_message_transaction() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_pre_dispatch(message_delivery_call(200)), - Ok(Some(delivery_pre_dispatch_data())), - ); - assert_eq!( - run_pre_dispatch(message_confirmation_call(200)), - Ok(Some(confirmation_pre_dispatch_data())), - ); - }); - } - - #[test] - fn post_dispatch_ignores_unknown_transaction() { - run_test(|| { - assert_storage_noop!(run_post_dispatch(None, Ok(()))); - }); - } - - #[test] - fn post_dispatch_ignores_failed_transaction() { - run_test(|| { - assert_storage_noop!(run_post_dispatch( - Some(all_finality_pre_dispatch_data()), - Err(DispatchError::BadOrigin) - )); - }); - } - - #[test] - fn post_dispatch_ignores_transaction_that_has_not_updated_relay_chain_state() { - run_test(|| { - initialize_environment(100, 200, 200); - - assert_storage_noop!(run_post_dispatch(Some(all_finality_pre_dispatch_data()), Ok(()))); - }); - } - - #[test] - fn post_dispatch_ignores_transaction_that_has_not_updated_parachain_state() { - run_test(|| { - initialize_environment(200, 100, 200); - - assert_storage_noop!(run_post_dispatch(Some(all_finality_pre_dispatch_data()), Ok(()))); - assert_storage_noop!(run_post_dispatch( - Some(parachain_finality_pre_dispatch_data()), - Ok(()) - )); - }); - } - - #[test] - fn post_dispatch_ignores_transaction_that_has_not_delivered_any_messages() { - run_test(|| { - initialize_environment(200, 200, 100); - - assert_storage_noop!(run_post_dispatch(Some(all_finality_pre_dispatch_data()), Ok(()))); - assert_storage_noop!(run_post_dispatch( - Some(parachain_finality_pre_dispatch_data()), - Ok(()) - )); - assert_storage_noop!(run_post_dispatch(Some(delivery_pre_dispatch_data()), Ok(()))); - - assert_storage_noop!(run_post_dispatch( - Some(all_finality_confirmation_pre_dispatch_data()), - Ok(()) - )); - assert_storage_noop!(run_post_dispatch( - Some(parachain_finality_confirmation_pre_dispatch_data()), - Ok(()) - )); - assert_storage_noop!(run_post_dispatch(Some(confirmation_pre_dispatch_data()), Ok(()))); - }); - } - - #[test] - fn post_dispatch_ignores_transaction_that_has_not_delivered_all_messages() { - run_test(|| { - initialize_environment(200, 200, 150); - - assert_storage_noop!(run_post_dispatch(Some(all_finality_pre_dispatch_data()), Ok(()))); - assert_storage_noop!(run_post_dispatch( - Some(parachain_finality_pre_dispatch_data()), - Ok(()) - )); - assert_storage_noop!(run_post_dispatch(Some(delivery_pre_dispatch_data()), Ok(()))); - - assert_storage_noop!(run_post_dispatch( - Some(all_finality_confirmation_pre_dispatch_data()), - Ok(()) - )); - assert_storage_noop!(run_post_dispatch( - Some(parachain_finality_confirmation_pre_dispatch_data()), - Ok(()) - )); - assert_storage_noop!(run_post_dispatch(Some(confirmation_pre_dispatch_data()), Ok(()))); - }); - } - - #[test] - fn post_dispatch_refunds_relayer_in_all_finality_batch_with_extra_weight() { - run_test(|| { - initialize_environment(200, 200, 200); - - let mut dispatch_info = dispatch_info(); - dispatch_info.weight = Weight::from_parts( - frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND * 2, - 0, - ); - - // without any size/weight refund: we expect regular reward - let pre_dispatch_data = all_finality_pre_dispatch_data(); - let regular_reward = expected_delivery_reward(); - run_post_dispatch(Some(pre_dispatch_data), Ok(())); - assert_eq!( - RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get() - ), - Some(regular_reward), - ); - - // now repeat the same with size+weight refund: we expect smaller reward - let mut pre_dispatch_data = all_finality_pre_dispatch_data(); - match pre_dispatch_data.call_info { - CallInfo::AllFinalityAndMsgs(ref mut info, ..) => { - info.extra_weight.set_ref_time( - frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND, - ); - info.extra_size = 32; - }, - _ => unreachable!(), - } - run_post_dispatch(Some(pre_dispatch_data), Ok(())); - let reward_after_two_calls = RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get(), - ) - .unwrap(); - assert!( - reward_after_two_calls < 2 * regular_reward, - "{} must be < 2 * {}", - reward_after_two_calls, - 2 * regular_reward, - ); - }); - } - - #[test] - fn post_dispatch_refunds_relayer_in_all_finality_batch() { - run_test(|| { - initialize_environment(200, 200, 200); - - run_post_dispatch(Some(all_finality_pre_dispatch_data()), Ok(())); - assert_eq!( - RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get() - ), - Some(expected_delivery_reward()), - ); - - run_post_dispatch(Some(all_finality_confirmation_pre_dispatch_data()), Ok(())); - assert_eq!( - RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgDeliveryProofsRewardsAccount::get() - ), - Some(expected_confirmation_reward()), - ); - }); - } - - #[test] - fn post_dispatch_refunds_relayer_in_parachain_finality_batch() { - run_test(|| { - initialize_environment(200, 200, 200); - - run_post_dispatch(Some(parachain_finality_pre_dispatch_data()), Ok(())); - assert_eq!( - RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get() - ), - Some(expected_delivery_reward()), - ); - - run_post_dispatch(Some(parachain_finality_confirmation_pre_dispatch_data()), Ok(())); - assert_eq!( - RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgDeliveryProofsRewardsAccount::get() - ), - Some(expected_confirmation_reward()), - ); - }); - } - - #[test] - fn post_dispatch_refunds_relayer_in_message_transaction() { - run_test(|| { - initialize_environment(200, 200, 200); - - run_post_dispatch(Some(delivery_pre_dispatch_data()), Ok(())); - assert_eq!( - RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get() - ), - Some(expected_delivery_reward()), - ); - - run_post_dispatch(Some(confirmation_pre_dispatch_data()), Ok(())); - assert_eq!( - RelayersPallet::::relayer_reward( - relayer_account_at_this_chain(), - MsgDeliveryProofsRewardsAccount::get() - ), - Some(expected_confirmation_reward()), - ); - }); - } - - #[test] - fn post_dispatch_slashing_relayer_stake() { - run_test(|| { - initialize_environment(200, 200, 100); - - let delivery_rewards_account_balance = - Balances::free_balance(delivery_rewards_account()); - - let test_stake: ThisChainBalance = TestStake::get(); - Balances::set_balance( - &relayer_account_at_this_chain(), - ExistentialDeposit::get() + test_stake * 10, - ); - - // slashing works for message delivery calls - BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) - .unwrap(); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), test_stake); - run_post_dispatch(Some(delivery_pre_dispatch_data()), Ok(())); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), 0); - assert_eq!( - delivery_rewards_account_balance + test_stake, - Balances::free_balance(delivery_rewards_account()) - ); - - BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) - .unwrap(); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), test_stake); - run_post_dispatch(Some(parachain_finality_pre_dispatch_data()), Ok(())); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), 0); - assert_eq!( - delivery_rewards_account_balance + test_stake * 2, - Balances::free_balance(delivery_rewards_account()) - ); - - BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) - .unwrap(); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), test_stake); - run_post_dispatch(Some(all_finality_pre_dispatch_data()), Ok(())); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), 0); - assert_eq!( - delivery_rewards_account_balance + test_stake * 3, - Balances::free_balance(delivery_rewards_account()) - ); - - // reserve doesn't work for message confirmation calls - let confirmation_rewards_account_balance = - Balances::free_balance(confirmation_rewards_account()); - - Balances::reserve(&relayer_account_at_this_chain(), test_stake).unwrap(); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), test_stake); - - assert_eq!( - confirmation_rewards_account_balance, - Balances::free_balance(confirmation_rewards_account()) - ); - run_post_dispatch(Some(confirmation_pre_dispatch_data()), Ok(())); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), test_stake); - - run_post_dispatch(Some(parachain_finality_confirmation_pre_dispatch_data()), Ok(())); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), test_stake); - - run_post_dispatch(Some(all_finality_confirmation_pre_dispatch_data()), Ok(())); - assert_eq!(Balances::reserved_balance(relayer_account_at_this_chain()), test_stake); - - // check that unreserve has happened, not slashing - assert_eq!( - delivery_rewards_account_balance + test_stake * 3, - Balances::free_balance(delivery_rewards_account()) - ); - assert_eq!( - confirmation_rewards_account_balance, - Balances::free_balance(confirmation_rewards_account()) - ); - }); - } - - fn run_analyze_call_result( - pre_dispatch_data: PreDispatchData, - dispatch_result: DispatchResult, - ) -> RelayerAccountAction { - TestExtensionProvider::analyze_call_result( - Some(Some(pre_dispatch_data)), - &dispatch_info(), - &post_dispatch_info(), - 1024, - &dispatch_result, - ) - } - - #[test] - fn analyze_call_result_shall_not_slash_for_transactions_with_too_many_messages() { - run_test(|| { - initialize_environment(100, 100, 100); - - // the `analyze_call_result` should return slash if number of bundled messages is - // within reasonable limits - assert_eq!( - run_analyze_call_result(all_finality_pre_dispatch_data(), Ok(())), - RelayerAccountAction::Slash( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get() - ), - ); - assert_eq!( - run_analyze_call_result(parachain_finality_pre_dispatch_data(), Ok(())), - RelayerAccountAction::Slash( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get() - ), - ); - assert_eq!( - run_analyze_call_result(delivery_pre_dispatch_data(), Ok(())), - RelayerAccountAction::Slash( - relayer_account_at_this_chain(), - MsgProofsRewardsAccount::get() - ), - ); - - // the `analyze_call_result` should not return slash if number of bundled messages is - // larger than the - assert_eq!( - run_analyze_call_result( - set_bundled_range_end(all_finality_pre_dispatch_data(), 1_000_000), - Ok(()) - ), - RelayerAccountAction::None, - ); - assert_eq!( - run_analyze_call_result( - set_bundled_range_end(parachain_finality_pre_dispatch_data(), 1_000_000), - Ok(()) - ), - RelayerAccountAction::None, - ); - assert_eq!( - run_analyze_call_result( - set_bundled_range_end(delivery_pre_dispatch_data(), 1_000_000), - Ok(()) - ), - RelayerAccountAction::None, - ); - }); - } - - #[test] - fn messages_ext_only_parses_standalone_transactions() { - run_test(|| { - initialize_environment(100, 100, 100); - - // relay + parachain + message delivery calls batch is ignored - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_delivery_batch_call(200, 200, 200) - ), - Ok(None), - ); - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_delivery_batch_call_ex(200, 200, 200) - ), - Ok(None), - ); - - // relay + parachain + message confirmation calls batch is ignored - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_confirmation_batch_call(200, 200, 200) - ), - Ok(None), - ); - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_confirmation_batch_call_ex(200, 200, 200) - ), - Ok(None), - ); - - // parachain + message delivery call batch is ignored - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - ¶chain_finality_and_delivery_batch_call(200, 200) - ), - Ok(None), - ); - - // parachain + message confirmation call batch is ignored - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - ¶chain_finality_and_confirmation_batch_call(200, 200) - ), - Ok(None), - ); - - // relay + message delivery call batch is ignored - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_delivery_batch_call(200, 200) - ), - Ok(None), - ); - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_delivery_batch_call_ex(200, 200) - ), - Ok(None), - ); - - // relay + message confirmation call batch is ignored - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_confirmation_batch_call(200, 200) - ), - Ok(None), - ); - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_confirmation_batch_call_ex(200, 200) - ), - Ok(None), - ); - - // message delivery call batch is accepted - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &message_delivery_call(200) - ), - Ok(Some(delivery_pre_dispatch_data().call_info)), - ); - - // message confirmation call batch is accepted - assert_eq!( - TestMessagesExtensionProvider::parse_and_check_for_obsolete_call( - &message_confirmation_call(200) - ), - Ok(Some(confirmation_pre_dispatch_data().call_info)), - ); - }); - } - - #[test] - fn messages_ext_rejects_calls_with_obsolete_messages() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_messages_pre_dispatch(message_delivery_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_messages_pre_dispatch(message_confirmation_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_messages_validate(message_delivery_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_messages_validate(message_confirmation_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - }); - } - - #[test] - fn messages_ext_accepts_calls_with_new_messages() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_messages_pre_dispatch(message_delivery_call(200)), - Ok(Some(delivery_pre_dispatch_data())), - ); - assert_eq!( - run_messages_pre_dispatch(message_confirmation_call(200)), - Ok(Some(confirmation_pre_dispatch_data())), - ); - - assert_eq!(run_messages_validate(message_delivery_call(200)), Ok(Default::default()),); - assert_eq!( - run_messages_validate(message_confirmation_call(200)), - Ok(Default::default()), - ); - }); - } - - #[test] - fn grandpa_ext_only_parses_valid_batches() { - run_test(|| { - initialize_environment(100, 100, 100); - - // relay + parachain + message delivery calls batch is ignored - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_delivery_batch_call(200, 200, 200) - ), - Ok(None), - ); - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_delivery_batch_call_ex(200, 200, 200) - ), - Ok(None), - ); - - // relay + parachain + message confirmation calls batch is ignored - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_confirmation_batch_call(200, 200, 200) - ), - Ok(None), - ); - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &all_finality_and_confirmation_batch_call_ex(200, 200, 200) - ), - Ok(None), - ); - - // parachain + message delivery call batch is ignored - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - ¶chain_finality_and_delivery_batch_call(200, 200) - ), - Ok(None), - ); - - // parachain + message confirmation call batch is ignored - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - ¶chain_finality_and_confirmation_batch_call(200, 200) - ), - Ok(None), - ); - - // relay + message delivery call batch is accepted - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_delivery_batch_call(200, 200) - ), - Ok(Some(relay_finality_pre_dispatch_data().call_info)), - ); - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_delivery_batch_call_ex(200, 200) - ), - Ok(Some(relay_finality_pre_dispatch_data_ex().call_info)), - ); - - // relay + message confirmation call batch is accepted - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_confirmation_batch_call(200, 200) - ), - Ok(Some(relay_finality_confirmation_pre_dispatch_data().call_info)), - ); - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &relay_finality_and_confirmation_batch_call_ex(200, 200) - ), - Ok(Some(relay_finality_confirmation_pre_dispatch_data_ex().call_info)), - ); - - // message delivery call batch is accepted - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &message_delivery_call(200) - ), - Ok(Some(delivery_pre_dispatch_data().call_info)), - ); - - // message confirmation call batch is accepted - assert_eq!( - TestGrandpaExtensionProvider::parse_and_check_for_obsolete_call( - &message_confirmation_call(200) - ), - Ok(Some(confirmation_pre_dispatch_data().call_info)), - ); - }); - } - - #[test] - fn grandpa_ext_rejects_batch_with_obsolete_relay_chain_header() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call(100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call_ex(100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_grandpa_validate(relay_finality_and_delivery_batch_call(100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_validate(relay_finality_and_delivery_batch_call_ex(100, 200)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - }); - } - - #[test] - fn grandpa_ext_rejects_calls_with_obsolete_messages() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call_ex(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_confirmation_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_confirmation_batch_call_ex(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_grandpa_validate(relay_finality_and_delivery_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_validate(relay_finality_and_delivery_batch_call_ex(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_validate(relay_finality_and_confirmation_batch_call(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_validate(relay_finality_and_confirmation_batch_call_ex(200, 100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_grandpa_pre_dispatch(message_delivery_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_pre_dispatch(message_confirmation_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - - assert_eq!( - run_grandpa_validate(message_delivery_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - assert_eq!( - run_grandpa_validate(message_confirmation_call(100)), - Err(TransactionValidityError::Invalid(InvalidTransaction::Stale)), - ); - }); - } - - #[test] - fn grandpa_ext_accepts_calls_with_new_messages() { - run_test(|| { - initialize_environment(100, 100, 100); - - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call(200, 200)), - Ok(Some(relay_finality_pre_dispatch_data()),) - ); - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_delivery_batch_call_ex(200, 200)), - Ok(Some(relay_finality_pre_dispatch_data_ex()),) - ); - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_confirmation_batch_call(200, 200)), - Ok(Some(relay_finality_confirmation_pre_dispatch_data())), - ); - assert_eq!( - run_grandpa_pre_dispatch(relay_finality_and_confirmation_batch_call_ex(200, 200)), - Ok(Some(relay_finality_confirmation_pre_dispatch_data_ex())), - ); - - assert_eq!( - run_grandpa_validate(relay_finality_and_delivery_batch_call(200, 200)), - Ok(Default::default()), - ); - assert_eq!( - run_grandpa_validate(relay_finality_and_delivery_batch_call_ex(200, 200)), - Ok(Default::default()), - ); - assert_eq!( - run_grandpa_validate(relay_finality_and_confirmation_batch_call(200, 200)), - Ok(Default::default()), - ); - assert_eq!( - run_grandpa_validate(relay_finality_and_confirmation_batch_call_ex(200, 200)), - Ok(Default::default()), - ); - - assert_eq!( - run_grandpa_pre_dispatch(message_delivery_call(200)), - Ok(Some(delivery_pre_dispatch_data())), - ); - assert_eq!( - run_grandpa_pre_dispatch(message_confirmation_call(200)), - Ok(Some(confirmation_pre_dispatch_data())), - ); - - assert_eq!(run_grandpa_validate(message_delivery_call(200)), Ok(Default::default()),); - assert_eq!( - run_grandpa_validate(message_confirmation_call(200)), - Ok(Default::default()), - ); - }); - } - - #[test] - fn does_not_panic_on_boosting_priority_of_empty_message_delivery_transaction() { - run_test(|| { - let best_delivered_message = MaxUnconfirmedMessagesAtInboundLane::get(); - initialize_environment(100, 100, best_delivered_message); - - // register relayer so it gets priority boost - BridgeRelayers::register(RuntimeOrigin::signed(relayer_account_at_this_chain()), 1000) - .unwrap(); - - // allow empty message delivery transactions - let lane_id = TestLaneId::get(); - let in_lane_data = InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { - relayer: relayer_account_at_bridged_chain(), - messages: DeliveredMessages { begin: 1, end: best_delivered_message }, - }] - .into(), - }; - pallet_bridge_messages::InboundLanes::::insert(lane_id, in_lane_data); - - // now check that the priority of empty tx is the same as priority of 1-message tx - let priority_of_zero_messages_delivery = - run_validate(message_delivery_call(best_delivered_message)).unwrap().priority; - let priority_of_one_messages_delivery = - run_validate(message_delivery_call(best_delivered_message + 1)) - .unwrap() - .priority; - - assert_eq!(priority_of_zero_messages_delivery, priority_of_one_messages_delivery); - }); - } -} diff --git a/bin/runtime-common/src/integrity.rs b/bin/runtime-common/src/integrity.rs deleted file mode 100644 index d3827a14dd6cc24e088a8d05d26aba9d769eb213..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/integrity.rs +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Integrity tests for chain constants and pallets configuration. -//! -//! Most of the tests in this module assume that the bridge is using standard (see `crate::messages` -//! module for details) configuration. - -use crate::{messages, messages::MessageBridge}; - -use bp_messages::{InboundLaneData, MessageNonce}; -use bp_runtime::{Chain, ChainId}; -use codec::Encode; -use frame_support::{storage::generator::StorageValue, traits::Get, weights::Weight}; -use frame_system::limits; -use pallet_bridge_messages::WeightInfoExt as _; - -/// Macro that ensures that the runtime configuration and chain primitives crate are sharing -/// the same types (nonce, block number, hash, hasher, account id and header). -#[macro_export] -macro_rules! assert_chain_types( - ( runtime: $r:path, this_chain: $this:path ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use frame_system::{Config as SystemConfig, pallet_prelude::{BlockNumberFor, HeaderFor}}; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as SystemConfig>::Nonce, bp_runtime::NonceOf<$this>); - assert_type_eq_all!(BlockNumberFor<$r>, bp_runtime::BlockNumberOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::Hash, bp_runtime::HashOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::Hashing, bp_runtime::HasherOf<$this>); - assert_type_eq_all!(<$r as SystemConfig>::AccountId, bp_runtime::AccountIdOf<$this>); - assert_type_eq_all!(HeaderFor<$r>, bp_runtime::HeaderOf<$this>); - } - } -); - -/// Macro that ensures that the bridge GRANDPA pallet is configured properly to bridge with given -/// chain. -#[macro_export] -macro_rules! assert_bridge_grandpa_pallet_types( - ( runtime: $r:path, with_bridged_chain_grandpa_instance: $i:path, bridged_chain: $bridged:path ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use pallet_bridge_grandpa::Config as GrandpaConfig; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as GrandpaConfig<$i>>::BridgedChain, $bridged); - } - } -); - -/// Macro that ensures that the bridge messages pallet is configured properly to bridge using given -/// configuration. -#[macro_export] -macro_rules! assert_bridge_messages_pallet_types( - ( - runtime: $r:path, - with_bridged_chain_messages_instance: $i:path, - bridge: $bridge:path - ) => { - { - // if one of asserts fail, then either bridge isn't configured properly (or alternatively - non-standard - // configuration is used), or something has broke existing configuration (meaning that all bridged chains - // and relays will stop functioning) - use $crate::messages::{ - source::{FromThisChainMessagePayload, TargetHeaderChainAdapter}, - target::{FromBridgedChainMessagePayload, SourceHeaderChainAdapter}, - AccountIdOf, BalanceOf, BridgedChain, ThisChain, - }; - use pallet_bridge_messages::Config as MessagesConfig; - use static_assertions::assert_type_eq_all; - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::OutboundPayload, FromThisChainMessagePayload); - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::InboundRelayer, AccountIdOf>); - - assert_type_eq_all!(<$r as MessagesConfig<$i>>::TargetHeaderChain, TargetHeaderChainAdapter<$bridge>); - assert_type_eq_all!(<$r as MessagesConfig<$i>>::SourceHeaderChain, SourceHeaderChainAdapter<$bridge>); - } - } -); - -/// Macro that combines four other macro calls - `assert_chain_types`, `assert_bridge_types`, -/// `assert_bridge_grandpa_pallet_types` and `assert_bridge_messages_pallet_types`. It may be used -/// at the chain that is implementing complete standard messages bridge (i.e. with bridge GRANDPA -/// and messages pallets deployed). -#[macro_export] -macro_rules! assert_complete_bridge_types( - ( - runtime: $r:path, - with_bridged_chain_grandpa_instance: $gi:path, - with_bridged_chain_messages_instance: $mi:path, - bridge: $bridge:path, - this_chain: $this:path, - bridged_chain: $bridged:path, - ) => { - $crate::assert_chain_types!(runtime: $r, this_chain: $this); - $crate::assert_bridge_grandpa_pallet_types!( - runtime: $r, - with_bridged_chain_grandpa_instance: $gi, - bridged_chain: $bridged - ); - $crate::assert_bridge_messages_pallet_types!( - runtime: $r, - with_bridged_chain_messages_instance: $mi, - bridge: $bridge - ); - } -); - -/// Parameters for asserting chain-related constants. -#[derive(Debug)] -pub struct AssertChainConstants { - /// Block length limits of the chain. - pub block_length: limits::BlockLength, - /// Block weight limits of the chain. - pub block_weights: limits::BlockWeights, -} - -/// Test that our hardcoded, chain-related constants, are matching chain runtime configuration. -/// -/// In particular, this test ensures that: -/// -/// 1) block weight limits are matching; -/// 2) block size limits are matching. -pub fn assert_chain_constants(params: AssertChainConstants) -where - R: frame_system::Config, -{ - // we don't check runtime version here, because in our case we'll be building relay from one - // repo and runtime will live in another repo, along with outdated relay version. To avoid - // unneeded commits, let's not raise an error in case of version mismatch. - - // if one of following assert fails, it means that we may need to upgrade bridged chain and - // relay to use updated constants. If constants are now smaller than before, it may lead to - // undeliverable messages. - - // `BlockLength` struct is not implementing `PartialEq`, so we compare encoded values here. - assert_eq!( - R::BlockLength::get().encode(), - params.block_length.encode(), - "BlockLength from runtime ({:?}) differ from hardcoded: {:?}", - R::BlockLength::get(), - params.block_length, - ); - // `BlockWeights` struct is not implementing `PartialEq`, so we compare encoded values here - assert_eq!( - R::BlockWeights::get().encode(), - params.block_weights.encode(), - "BlockWeights from runtime ({:?}) differ from hardcoded: {:?}", - R::BlockWeights::get(), - params.block_weights, - ); -} - -/// Test that the constants, used in GRANDPA pallet configuration are valid. -pub fn assert_bridge_grandpa_pallet_constants() -where - R: pallet_bridge_grandpa::Config, - GI: 'static, -{ - assert!( - R::HeadersToKeep::get() > 0, - "HeadersToKeep ({}) must be larger than zero", - R::HeadersToKeep::get(), - ); -} - -/// Parameters for asserting messages pallet constants. -#[derive(Debug)] -pub struct AssertBridgeMessagesPalletConstants { - /// Maximal number of unrewarded relayer entries in a confirmation transaction at the bridged - /// chain. - pub max_unrewarded_relayers_in_bridged_confirmation_tx: MessageNonce, - /// Maximal number of unconfirmed messages in a confirmation transaction at the bridged chain. - pub max_unconfirmed_messages_in_bridged_confirmation_tx: MessageNonce, - /// Identifier of the bridged chain. - pub bridged_chain_id: ChainId, -} - -/// Test that the constants, used in messages pallet configuration are valid. -pub fn assert_bridge_messages_pallet_constants(params: AssertBridgeMessagesPalletConstants) -where - R: pallet_bridge_messages::Config, - MI: 'static, -{ - assert!( - !R::ActiveOutboundLanes::get().is_empty(), - "ActiveOutboundLanes ({:?}) must not be empty", - R::ActiveOutboundLanes::get(), - ); - assert!( - R::MaxUnrewardedRelayerEntriesAtInboundLane::get() <= params.max_unrewarded_relayers_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnrewardedRelayerEntriesAtInboundLane::get(), - params.max_unrewarded_relayers_in_bridged_confirmation_tx, - ); - assert!( - R::MaxUnconfirmedMessagesAtInboundLane::get() <= params.max_unconfirmed_messages_in_bridged_confirmation_tx, - "MaxUnrewardedRelayerEntriesAtInboundLane ({}) must be <= than the hardcoded value for bridged chain: {}", - R::MaxUnconfirmedMessagesAtInboundLane::get(), - params.max_unconfirmed_messages_in_bridged_confirmation_tx, - ); - assert_eq!(R::BridgedChainId::get(), params.bridged_chain_id); -} - -/// Parameters for asserting bridge pallet names. -#[derive(Debug)] -pub struct AssertBridgePalletNames<'a> { - /// Name of the messages pallet, deployed at the bridged chain and used to bridge with this - /// chain. - pub with_this_chain_messages_pallet_name: &'a str, - /// Name of the GRANDPA pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_grandpa_pallet_name: &'a str, - /// Name of the messages pallet, deployed at this chain and used to bridge with the bridged - /// chain. - pub with_bridged_chain_messages_pallet_name: &'a str, -} - -/// Tests that bridge pallet names used in `construct_runtime!()` macro call are matching constants -/// from chain primitives crates. -pub fn assert_bridge_pallet_names(params: AssertBridgePalletNames) -where - B: MessageBridge, - R: pallet_bridge_grandpa::Config + pallet_bridge_messages::Config, - GI: 'static, - MI: 'static, -{ - assert_eq!(B::BRIDGED_MESSAGES_PALLET_NAME, params.with_this_chain_messages_pallet_name); - assert_eq!( - pallet_bridge_grandpa::PalletOwner::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key(params.with_bridged_chain_grandpa_pallet_name, "PalletOwner",).0, - ); - assert_eq!( - pallet_bridge_messages::PalletOwner::::storage_value_final_key().to_vec(), - bp_runtime::storage_value_key( - params.with_bridged_chain_messages_pallet_name, - "PalletOwner", - ) - .0, - ); -} - -/// Parameters for asserting complete standard messages bridge. -#[derive(Debug)] -pub struct AssertCompleteBridgeConstants<'a> { - /// Parameters to assert this chain constants. - pub this_chain_constants: AssertChainConstants, - /// Parameters to assert messages pallet constants. - pub messages_pallet_constants: AssertBridgeMessagesPalletConstants, - /// Parameters to assert pallet names constants. - pub pallet_names: AssertBridgePalletNames<'a>, -} - -/// All bridge-related constants tests for the complete standard messages bridge (i.e. with bridge -/// GRANDPA and messages pallets deployed). -pub fn assert_complete_bridge_constants(params: AssertCompleteBridgeConstants) -where - R: frame_system::Config - + pallet_bridge_grandpa::Config - + pallet_bridge_messages::Config, - GI: 'static, - MI: 'static, - B: MessageBridge, -{ - assert_chain_constants::(params.this_chain_constants); - assert_bridge_grandpa_pallet_constants::(); - assert_bridge_messages_pallet_constants::(params.messages_pallet_constants); - assert_bridge_pallet_names::(params.pallet_names); -} - -/// Check that the message lane weights are correct. -pub fn check_message_lane_weights< - C: Chain, - T: frame_system::Config + pallet_bridge_messages::Config, - MessagesPalletInstance: 'static, ->( - bridged_chain_extra_storage_proof_size: u32, - this_chain_max_unrewarded_relayers: MessageNonce, - this_chain_max_unconfirmed_messages: MessageNonce, - // whether `RefundBridgedParachainMessages` extension is deployed at runtime and is used for - // refunding this bridge transactions? - // - // in other words: pass true for all known production chains - runtime_includes_refund_extension: bool, -) { - type Weights = >::WeightInfo; - - // check basic weight assumptions - pallet_bridge_messages::ensure_weights_are_correct::>(); - - // check that weights allow us to receive messages - let max_incoming_message_proof_size = bridged_chain_extra_storage_proof_size - .saturating_add(messages::target::maximal_incoming_message_size(C::max_extrinsic_size())); - pallet_bridge_messages::ensure_able_to_receive_message::>( - C::max_extrinsic_size(), - C::max_extrinsic_weight(), - max_incoming_message_proof_size, - messages::target::maximal_incoming_message_dispatch_weight(C::max_extrinsic_weight()), - ); - - // check that weights allow us to receive delivery confirmations - let max_incoming_inbound_lane_data_proof_size = - InboundLaneData::<()>::encoded_size_hint_u32(this_chain_max_unrewarded_relayers as _); - pallet_bridge_messages::ensure_able_to_receive_confirmation::>( - C::max_extrinsic_size(), - C::max_extrinsic_weight(), - max_incoming_inbound_lane_data_proof_size, - this_chain_max_unrewarded_relayers, - this_chain_max_unconfirmed_messages, - ); - - // check that extra weights of delivery/confirmation transactions include the weight - // of `RefundBridgedParachainMessages` operations. This signed extension assumes the worst case - // (i.e. slashing if delivery transaction was invalid) and refunds some weight if - // assumption was wrong (i.e. if we did refund instead of slashing). This check - // ensures the extension will not refund weight when it doesn't need to (i.e. if pallet - // weights do not account weights of refund extension). - if runtime_includes_refund_extension { - assert_ne!( - Weights::::receive_messages_proof_overhead_from_runtime(), - Weight::zero() - ); - assert_ne!( - Weights::::receive_messages_delivery_proof_overhead_from_runtime(), - Weight::zero() - ); - } -} diff --git a/bin/runtime-common/src/lib.rs b/bin/runtime-common/src/lib.rs deleted file mode 100644 index 1a5f2067453a10f6e57d4b91204c9a9926cca348..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Common types/functions that may be used by runtimes of all bridged chains. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_runtime::{Parachain, ParachainIdOf}; -use sp_runtime::traits::{Get, PhantomData}; - -pub mod extensions; -pub mod messages; -pub mod messages_api; -pub mod messages_benchmarking; -pub mod messages_call_ext; -pub mod messages_generation; -pub mod messages_xcm_extension; -pub mod parachains_benchmarking; - -mod mock; - -#[cfg(feature = "integrity-test")] -pub mod integrity; - -const LOG_TARGET_BRIDGE_DISPATCH: &str = "runtime::bridge-dispatch"; - -/// Trait identifying a bridged parachain. A relayer might be refunded for delivering messages -/// coming from this parachain. -pub trait RefundableParachainId { - /// The instance of the bridge parachains pallet. - type Instance: 'static; - /// The parachain Id. - type Id: Get; -} - -/// Default implementation of `RefundableParachainId`. -pub struct DefaultRefundableParachainId(PhantomData<(Instance, Id)>); - -impl RefundableParachainId for DefaultRefundableParachainId -where - Instance: 'static, - Id: Get, -{ - type Instance = Instance; - type Id = Id; -} - -/// Implementation of `RefundableParachainId` for `trait Parachain`. -pub struct RefundableParachain(PhantomData<(Instance, Para)>); - -impl RefundableParachainId for RefundableParachain -where - Instance: 'static, - Para: Parachain, -{ - type Instance = Instance; - type Id = ParachainIdOf; -} diff --git a/bin/runtime-common/src/messages.rs b/bin/runtime-common/src/messages.rs deleted file mode 100644 index 4aca53f3b98361b1a5f7d5dc89dc72ec0bc1323c..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/messages.rs +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types that allow runtime to act as a source/target endpoint of message lanes. -//! -//! Messages are assumed to be encoded `Call`s of the target chain. Call-dispatch -//! pallet is used to dispatch incoming messages. Message identified by a tuple -//! of to elements - message lane id and message nonce. - -pub use bp_runtime::{RangeInclusiveExt, UnderlyingChainOf, UnderlyingChainProvider}; - -use bp_header_chain::HeaderChain; -use bp_messages::{ - source_chain::TargetHeaderChain, - target_chain::{ProvedLaneMessages, ProvedMessages, SourceHeaderChain}, - InboundLaneData, LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, - VerificationError, -}; -use bp_runtime::{Chain, RawStorageProof, Size, StorageProofChecker}; -use codec::{Decode, Encode}; -use frame_support::{traits::Get, weights::Weight}; -use hash_db::Hasher; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::{convert::TryFrom, marker::PhantomData, vec::Vec}; - -/// Bidirectional message bridge. -pub trait MessageBridge { - /// Name of the paired messages pallet instance at the Bridged chain. - /// - /// Should be the name that is used in the `construct_runtime!()` macro. - const BRIDGED_MESSAGES_PALLET_NAME: &'static str; - - /// This chain in context of message bridge. - type ThisChain: ThisChainWithMessages; - /// Bridged chain in context of message bridge. - type BridgedChain: BridgedChainWithMessages; - /// Bridged header chain. - type BridgedHeaderChain: HeaderChain>; -} - -/// This chain that has `pallet-bridge-messages` module. -pub trait ThisChainWithMessages: UnderlyingChainProvider { - /// Call origin on the chain. - type RuntimeOrigin; -} - -/// Bridged chain that has `pallet-bridge-messages` module. -pub trait BridgedChainWithMessages: UnderlyingChainProvider {} - -/// This chain in context of message bridge. -pub type ThisChain = ::ThisChain; -/// Bridged chain in context of message bridge. -pub type BridgedChain = ::BridgedChain; -/// Hash used on the chain. -pub type HashOf = bp_runtime::HashOf<::Chain>; -/// Hasher used on the chain. -pub type HasherOf = bp_runtime::HasherOf>; -/// Account id used on the chain. -pub type AccountIdOf = bp_runtime::AccountIdOf>; -/// Type of balances that is used on the chain. -pub type BalanceOf = bp_runtime::BalanceOf>; - -/// Sub-module that is declaring types required for processing This -> Bridged chain messages. -pub mod source { - use super::*; - - /// Message payload for This -> Bridged chain messages. - pub type FromThisChainMessagePayload = crate::messages_xcm_extension::XcmAsPlainPayload; - - /// Maximal size of outbound message payload. - pub struct FromThisChainMaximalOutboundPayloadSize(PhantomData); - - impl Get for FromThisChainMaximalOutboundPayloadSize { - fn get() -> u32 { - maximal_message_size::() - } - } - - /// Messages delivery proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of inbound lane state; - /// - lane id. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesDeliveryProof { - /// Hash of the bridge header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// Storage trie proof generated for [`Self::bridged_header_hash`]. - pub storage_proof: RawStorageProof, - /// Lane id of which messages were delivered and the proof is for. - pub lane: LaneId, - } - - impl Size for FromBridgedChainMessagesDeliveryProof { - fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// 'Parsed' message delivery proof - inbound lane id and its state. - pub type ParsedMessagesDeliveryProofFromBridgedChain = - (LaneId, InboundLaneData>>); - - /// Return maximal message size of This -> Bridged chain message. - pub fn maximal_message_size() -> u32 { - super::target::maximal_incoming_message_size( - UnderlyingChainOf::>::max_extrinsic_size(), - ) - } - - /// `TargetHeaderChain` implementation that is using default types and perform default checks. - pub struct TargetHeaderChainAdapter(PhantomData); - - impl TargetHeaderChain>> - for TargetHeaderChainAdapter - { - type MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof>>; - - fn verify_message(payload: &FromThisChainMessagePayload) -> Result<(), VerificationError> { - verify_chain_message::(payload) - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData>>), VerificationError> { - verify_messages_delivery_proof::(proof) - } - } - - /// Do basic Bridged-chain specific verification of This -> Bridged chain message. - /// - /// Ok result from this function means that the delivery transaction with this message - /// may be 'mined' by the target chain. - pub fn verify_chain_message( - payload: &FromThisChainMessagePayload, - ) -> Result<(), VerificationError> { - // IMPORTANT: any error that is returned here is fatal for the bridge, because - // this code is executed at the bridge hub and message sender actually lives - // at some sibling parachain. So we are failing **after** the message has been - // sent and we can't report it back to sender (unless error report mechanism is - // embedded into message and its dispatcher). - - // apart from maximal message size check (see below), we should also check the message - // dispatch weight here. But we assume that the bridged chain will just push the message - // to some queue (XCMP, UMP, DMP), so the weight is constant and fits the block. - - // The maximal size of extrinsic at Substrate-based chain depends on the - // `frame_system::Config::MaximumBlockLength` and - // `frame_system::Config::AvailableBlockRatio` constants. This check is here to be sure that - // the lane won't stuck because message is too large to fit into delivery transaction. - // - // **IMPORTANT NOTE**: the delivery transaction contains storage proof of the message, not - // the message itself. The proof is always larger than the message. But unless chain state - // is enormously large, it should be several dozens/hundreds of bytes. The delivery - // transaction also contains signatures and signed extensions. Because of this, we reserve - // 1/3 of the the maximal extrinsic size for this data. - if payload.len() > maximal_message_size::() as usize { - return Err(VerificationError::MessageTooLarge) - } - - Ok(()) - } - - /// Verify proof of This -> Bridged chain messages delivery. - /// - /// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged - /// parachains, please use the `verify_messages_delivery_proof_from_parachain`. - pub fn verify_messages_delivery_proof( - proof: FromBridgedChainMessagesDeliveryProof>>, - ) -> Result, VerificationError> { - let FromBridgedChainMessagesDeliveryProof { bridged_header_hash, storage_proof, lane } = - proof; - let mut storage = - B::BridgedHeaderChain::storage_proof_checker(bridged_header_hash, storage_proof) - .map_err(VerificationError::HeaderChain)?; - // Messages delivery proof is just proof of single storage key read => any error - // is fatal. - let storage_inbound_lane_data_key = bp_messages::storage_keys::inbound_lane_data_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &lane, - ); - let inbound_lane_data = storage - .read_and_decode_mandatory_value(storage_inbound_lane_data_key.0.as_ref()) - .map_err(VerificationError::InboundLaneStorage)?; - - // check that the storage proof doesn't have any untouched trie nodes - storage.ensure_no_unused_nodes().map_err(VerificationError::StorageProof)?; - - Ok((lane, inbound_lane_data)) - } -} - -/// Sub-module that is declaring types required for processing Bridged -> This chain messages. -pub mod target { - use super::*; - - /// Decoded Bridged -> This message payload. - pub type FromBridgedChainMessagePayload = crate::messages_xcm_extension::XcmAsPlainPayload; - - /// Messages proof from bridged chain: - /// - /// - hash of finalized header; - /// - storage proof of messages and (optionally) outbound lane state; - /// - lane id; - /// - nonces (inclusive range) of messages which are included in this proof. - #[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] - pub struct FromBridgedChainMessagesProof { - /// Hash of the finalized bridged header the proof is for. - pub bridged_header_hash: BridgedHeaderHash, - /// A storage trie proof of messages being delivered. - pub storage_proof: RawStorageProof, - /// Messages in this proof are sent over this lane. - pub lane: LaneId, - /// Nonce of the first message being delivered. - pub nonces_start: MessageNonce, - /// Nonce of the last message being delivered. - pub nonces_end: MessageNonce, - } - - impl Size for FromBridgedChainMessagesProof { - fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } - } - - /// Return maximal dispatch weight of the message we're able to receive. - pub fn maximal_incoming_message_dispatch_weight(maximal_extrinsic_weight: Weight) -> Weight { - maximal_extrinsic_weight / 2 - } - - /// Return maximal message size given maximal extrinsic size. - pub fn maximal_incoming_message_size(maximal_extrinsic_size: u32) -> u32 { - maximal_extrinsic_size / 3 * 2 - } - - /// `SourceHeaderChain` implementation that is using default types and perform default checks. - pub struct SourceHeaderChainAdapter(PhantomData); - - impl SourceHeaderChain for SourceHeaderChainAdapter { - type MessagesProof = FromBridgedChainMessagesProof>>; - - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result, VerificationError> { - verify_messages_proof::(proof, messages_count) - } - } - - /// Verify proof of Bridged -> This chain messages. - /// - /// This function is used when Bridged chain is directly using GRANDPA finality. For Bridged - /// parachains, please use the `verify_messages_proof_from_parachain`. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside of this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - pub fn verify_messages_proof( - proof: FromBridgedChainMessagesProof>>, - messages_count: u32, - ) -> Result, VerificationError> { - let FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane, - nonces_start, - nonces_end, - } = proof; - let storage = - B::BridgedHeaderChain::storage_proof_checker(bridged_header_hash, storage_proof) - .map_err(VerificationError::HeaderChain)?; - let mut parser = StorageProofCheckerAdapter::<_, B> { storage, _dummy: Default::default() }; - let nonces_range = nonces_start..=nonces_end; - - // receiving proofs where end < begin is ok (if proof includes outbound lane state) - let messages_in_the_proof = nonces_range.checked_len().unwrap_or(0); - if messages_in_the_proof != MessageNonce::from(messages_count) { - return Err(VerificationError::MessagesCountMismatch) - } - - // Read messages first. All messages that are claimed to be in the proof must - // be in the proof. So any error in `read_value`, or even missing value is fatal. - // - // Mind that we allow proofs with no messages if outbound lane state is proved. - let mut messages = Vec::with_capacity(messages_in_the_proof as _); - for nonce in nonces_range { - let message_key = MessageKey { lane_id: lane, nonce }; - let message_payload = parser.read_and_decode_message_payload(&message_key)?; - messages.push(Message { key: message_key, payload: message_payload }); - } - - // Now let's check if proof contains outbound lane state proof. It is optional, so - // we simply ignore `read_value` errors and missing value. - let proved_lane_messages = ProvedLaneMessages { - lane_state: parser.read_and_decode_outbound_lane_data(&lane)?, - messages, - }; - - // Now we may actually check if the proof is empty or not. - if proved_lane_messages.lane_state.is_none() && proved_lane_messages.messages.is_empty() { - return Err(VerificationError::EmptyMessageProof) - } - - // check that the storage proof doesn't have any untouched trie nodes - parser - .storage - .ensure_no_unused_nodes() - .map_err(VerificationError::StorageProof)?; - - // We only support single lane messages in this generated_schema - let mut proved_messages = ProvedMessages::new(); - proved_messages.insert(lane, proved_lane_messages); - - Ok(proved_messages) - } - - struct StorageProofCheckerAdapter { - storage: StorageProofChecker, - _dummy: sp_std::marker::PhantomData, - } - - impl StorageProofCheckerAdapter { - fn read_and_decode_outbound_lane_data( - &mut self, - lane_id: &LaneId, - ) -> Result, VerificationError> { - let storage_outbound_lane_data_key = bp_messages::storage_keys::outbound_lane_data_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - lane_id, - ); - - self.storage - .read_and_decode_opt_value(storage_outbound_lane_data_key.0.as_ref()) - .map_err(VerificationError::OutboundLaneStorage) - } - - fn read_and_decode_message_payload( - &mut self, - message_key: &MessageKey, - ) -> Result { - let storage_message_key = bp_messages::storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &message_key.lane_id, - message_key.nonce, - ); - self.storage - .read_and_decode_mandatory_value(storage_message_key.0.as_ref()) - .map_err(VerificationError::MessageStorage) - } - } -} - -/// The `BridgeMessagesCall` used by a chain. -pub type BridgeMessagesCallOf = bp_messages::BridgeMessagesCall< - bp_runtime::AccountIdOf, - target::FromBridgedChainMessagesProof>, - source::FromBridgedChainMessagesDeliveryProof>, ->; - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_messages_storage_proof, - }, - mock::*, - }; - use bp_header_chain::{HeaderChainError, StoredHeaderDataBuilder}; - use bp_runtime::{HeaderId, StorageProofError}; - use codec::Encode; - use sp_core::H256; - use sp_runtime::traits::Header as _; - - #[test] - fn verify_chain_message_rejects_message_with_too_large_declared_weight() { - assert!(source::verify_chain_message::(&vec![ - 42; - BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT - - 1 - ]) - .is_err()); - } - - #[test] - fn verify_chain_message_rejects_message_too_large_message() { - assert!(source::verify_chain_message::(&vec![ - 0; - source::maximal_message_size::() - as usize + 1 - ],) - .is_err()); - } - - #[test] - fn verify_chain_message_accepts_maximal_message() { - assert_eq!( - source::verify_chain_message::(&vec![ - 0; - source::maximal_message_size::() - as _ - ],), - Ok(()), - ); - } - - fn using_messages_proof( - nonces_end: MessageNonce, - outbound_lane_data: Option, - encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, - encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, - test: impl Fn(target::FromBridgedChainMessagesProof) -> R, - ) -> R { - let (state_root, storage_proof) = prepare_messages_storage_proof::( - TEST_LANE_ID, - 1..=nonces_end, - outbound_lane_data, - bp_runtime::StorageProofSize::Minimal(0), - vec![42], - encode_message, - encode_outbound_lane_data, - ); - - sp_io::TestExternalities::new(Default::default()).execute_with(move || { - let bridged_header = BridgedChainHeader::new( - 0, - Default::default(), - state_root, - Default::default(), - Default::default(), - ); - let bridged_header_hash = bridged_header.hash(); - - pallet_bridge_grandpa::BestFinalized::::put(HeaderId( - 0, - bridged_header_hash, - )); - pallet_bridge_grandpa::ImportedHeaders::::insert( - bridged_header_hash, - bridged_header.build(), - ); - test(target::FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane: TEST_LANE_ID, - nonces_start: 1, - nonces_end, - }) - }) - } - - #[test] - fn messages_proof_is_rejected_if_declared_less_than_actual_number_of_messages() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 5) - }), - Err(VerificationError::MessagesCountMismatch), - ); - } - - #[test] - fn messages_proof_is_rejected_if_declared_more_than_actual_number_of_messages() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 15) - }), - Err(VerificationError::MessagesCountMismatch), - ); - } - - #[test] - fn message_proof_is_rejected_if_header_is_missing_from_the_chain() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - let bridged_header_hash = - pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; - pallet_bridge_grandpa::ImportedHeaders::::remove(bridged_header_hash); - target::verify_messages_proof::(proof, 10) - }), - Err(VerificationError::HeaderChain(HeaderChainError::UnknownHeader)), - ); - } - - #[test] - fn message_proof_is_rejected_if_header_state_root_mismatches() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |proof| { - let bridged_header_hash = - pallet_bridge_grandpa::BestFinalized::::get().unwrap().1; - pallet_bridge_grandpa::ImportedHeaders::::insert( - bridged_header_hash, - BridgedChainHeader::new( - 0, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - .build(), - ); - target::verify_messages_proof::(proof, 10) - }), - Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( - StorageProofError::StorageRootMismatch - ))), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_has_duplicate_trie_nodes() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| { - let node = proof.storage_proof.pop().unwrap(); - proof.storage_proof.push(node.clone()); - proof.storage_proof.push(node); - target::verify_messages_proof::(proof, 10) - },), - Err(VerificationError::HeaderChain(HeaderChainError::StorageProof( - StorageProofError::DuplicateNodesInProof - ))), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_has_unused_trie_nodes() { - assert_eq!( - using_messages_proof(10, None, encode_all_messages, encode_lane_data, |mut proof| { - proof.storage_proof.push(vec![42]); - target::verify_messages_proof::(proof, 10) - },), - Err(VerificationError::StorageProof(StorageProofError::UnusedNodesInTheProof)), - ); - } - - #[test] - fn message_proof_is_rejected_if_required_message_is_missing() { - matches!( - using_messages_proof( - 10, - None, - |n, m| if n != 5 { Some(m.encode()) } else { None }, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 10) - ), - Err(VerificationError::MessageStorage(StorageProofError::StorageValueEmpty)), - ); - } - - #[test] - fn message_proof_is_rejected_if_message_decode_fails() { - matches!( - using_messages_proof( - 10, - None, - |n, m| { - let mut m = m.encode(); - if n == 5 { - m = vec![42] - } - Some(m) - }, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 10), - ), - Err(VerificationError::MessageStorage(StorageProofError::StorageValueDecodeFailed(_))), - ); - } - - #[test] - fn message_proof_is_rejected_if_outbound_lane_state_decode_fails() { - matches!( - using_messages_proof( - 10, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - |d| { - let mut d = d.encode(); - d.truncate(1); - d - }, - |proof| target::verify_messages_proof::(proof, 10), - ), - Err(VerificationError::OutboundLaneStorage( - StorageProofError::StorageValueDecodeFailed(_) - )), - ); - } - - #[test] - fn message_proof_is_rejected_if_it_is_empty() { - assert_eq!( - using_messages_proof(0, None, encode_all_messages, encode_lane_data, |proof| { - target::verify_messages_proof::(proof, 0) - },), - Err(VerificationError::EmptyMessageProof), - ); - } - - #[test] - fn non_empty_message_proof_without_messages_is_accepted() { - assert_eq!( - using_messages_proof( - 0, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 0), - ), - Ok(vec![( - TEST_LANE_ID, - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: Vec::new(), - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn non_empty_message_proof_is_accepted() { - assert_eq!( - using_messages_proof( - 1, - Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - encode_all_messages, - encode_lane_data, - |proof| target::verify_messages_proof::(proof, 1), - ), - Ok(vec![( - TEST_LANE_ID, - ProvedLaneMessages { - lane_state: Some(OutboundLaneData { - oldest_unpruned_nonce: 1, - latest_received_nonce: 1, - latest_generated_nonce: 1, - }), - messages: vec![Message { - key: MessageKey { lane_id: TEST_LANE_ID, nonce: 1 }, - payload: vec![42], - }], - }, - )] - .into_iter() - .collect()), - ); - } - - #[test] - fn verify_messages_proof_does_not_panic_if_messages_count_mismatches() { - assert_eq!( - using_messages_proof(1, None, encode_all_messages, encode_lane_data, |mut proof| { - proof.nonces_end = u64::MAX; - target::verify_messages_proof::(proof, u32::MAX) - },), - Err(VerificationError::MessagesCountMismatch), - ); - } -} diff --git a/bin/runtime-common/src/messages_api.rs b/bin/runtime-common/src/messages_api.rs deleted file mode 100644 index ccf1c754041ed84dc302f0660fdd5bde8dc8d533..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/messages_api.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Helpers for implementing various message-related runtime API mthods. - -use bp_messages::{ - InboundMessageDetails, LaneId, MessageNonce, MessagePayload, OutboundMessageDetails, -}; -use sp_std::vec::Vec; - -/// Implementation of the `To*OutboundLaneApi::message_details`. -pub fn outbound_message_details( - lane: LaneId, - begin: MessageNonce, - end: MessageNonce, -) -> Vec -where - Runtime: pallet_bridge_messages::Config, - MessagesPalletInstance: 'static, -{ - (begin..=end) - .filter_map(|nonce| { - let message_data = - pallet_bridge_messages::Pallet::::outbound_message_data(lane, nonce)?; - Some(OutboundMessageDetails { - nonce, - // dispatch message weight is always zero at the source chain, since we're paying for - // dispatch at the target chain - dispatch_weight: frame_support::weights::Weight::zero(), - size: message_data.len() as _, - }) - }) - .collect() -} - -/// Implementation of the `To*InboundLaneApi::message_details`. -pub fn inbound_message_details( - lane: LaneId, - messages: Vec<(MessagePayload, OutboundMessageDetails)>, -) -> Vec -where - Runtime: pallet_bridge_messages::Config, - MessagesPalletInstance: 'static, -{ - messages - .into_iter() - .map(|(payload, details)| { - pallet_bridge_messages::Pallet::::inbound_message_data( - lane, payload, details, - ) - }) - .collect() -} diff --git a/bin/runtime-common/src/messages_benchmarking.rs b/bin/runtime-common/src/messages_benchmarking.rs deleted file mode 100644 index 0c7a9ad1a83d6a83e0c9fe1f5e77ba2c4cefc17d..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/messages_benchmarking.rs +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to run benchmarks of messages module, based on -//! `bridge_runtime_common::messages` implementation. - -#![cfg(feature = "runtime-benchmarks")] - -use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - AccountIdOf, BridgedChain, HashOf, MessageBridge, ThisChain, - }, - messages_generation::{ - encode_all_messages, encode_lane_data, prepare_message_delivery_storage_proof, - prepare_messages_storage_proof, - }, -}; - -use bp_messages::MessagePayload; -use bp_polkadot_core::parachains::ParaHash; -use bp_runtime::{Chain, Parachain, StorageProofSize, UnderlyingChainOf}; -use codec::Encode; -use frame_support::weights::Weight; -use pallet_bridge_messages::benchmarking::{MessageDeliveryProofParams, MessageProofParams}; -use sp_runtime::traits::{Header, Zero}; -use sp_std::prelude::*; -use xcm::latest::prelude::*; - -/// Prepare inbound bridge message according to given message proof parameters. -fn prepare_inbound_message( - params: &MessageProofParams, - successful_dispatch_message_generator: impl Fn(usize) -> MessagePayload, -) -> MessagePayload { - // we only care about **this** message size when message proof needs to be `Minimal` - let expected_size = match params.size { - StorageProofSize::Minimal(size) => size as usize, - _ => 0, - }; - - // if we don't need a correct message, then we may just return some random blob - if !params.is_successful_dispatch_expected { - return vec![0u8; expected_size] - } - - // else let's prepare successful message. - let msg = successful_dispatch_message_generator(expected_size); - assert!( - msg.len() >= expected_size, - "msg.len(): {} does not match expected_size: {}", - expected_size, - msg.len() - ); - msg -} - -/// Prepare proof of messages for the `receive_messages_proof` call. -/// -/// In addition to returning valid messages proof, environment is prepared to verify this message -/// proof. -/// -/// This method is intended to be used when benchmarking pallet, linked to the chain that -/// uses GRANDPA finality. For parachains, please use the `prepare_message_proof_from_parachain` -/// function. -pub fn prepare_message_proof_from_grandpa_chain( - params: MessageProofParams, - message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) -where - R: pallet_bridge_grandpa::Config>>, - FI: 'static, - B: MessageBridge, -{ - // prepare storage proof - let (state_root, storage_proof) = prepare_messages_storage_proof::( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.size, - prepare_inbound_message(¶ms, message_generator), - encode_all_messages, - encode_lane_data, - ); - - // update runtime storage - let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); - - ( - FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane: params.lane, - nonces_start: *params.message_nonces.start(), - nonces_end: *params.message_nonces.end(), - }, - Weight::MAX / 1000, - ) -} - -/// Prepare proof of messages for the `receive_messages_proof` call. -/// -/// In addition to returning valid messages proof, environment is prepared to verify this message -/// proof. -/// -/// This method is intended to be used when benchmarking pallet, linked to the chain that -/// uses parachain finality. For GRANDPA chains, please use the -/// `prepare_message_proof_from_grandpa_chain` function. -pub fn prepare_message_proof_from_parachain( - params: MessageProofParams, - message_generator: impl Fn(usize) -> MessagePayload, -) -> (FromBridgedChainMessagesProof>>, Weight) -where - R: pallet_bridge_parachains::Config, - PI: 'static, - B: MessageBridge, - UnderlyingChainOf>: Chain + Parachain, -{ - // prepare storage proof - let (state_root, storage_proof) = prepare_messages_storage_proof::( - params.lane, - params.message_nonces.clone(), - params.outbound_lane_data.clone(), - params.size, - prepare_inbound_message(¶ms, message_generator), - encode_all_messages, - encode_lane_data, - ); - - // update runtime storage - let (_, bridged_header_hash) = - insert_header_to_parachains_pallet::>>(state_root); - - ( - FromBridgedChainMessagesProof { - bridged_header_hash, - storage_proof, - lane: params.lane, - nonces_start: *params.message_nonces.start(), - nonces_end: *params.message_nonces.end(), - }, - Weight::MAX / 1000, - ) -} - -/// Prepare proof of messages delivery for the `receive_messages_delivery_proof` call. -/// -/// This method is intended to be used when benchmarking pallet, linked to the chain that -/// uses GRANDPA finality. For parachains, please use the -/// `prepare_message_delivery_proof_from_parachain` function. -pub fn prepare_message_delivery_proof_from_grandpa_chain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> -where - R: pallet_bridge_grandpa::Config>>, - FI: 'static, - B: MessageBridge, -{ - // prepare storage proof - let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - params.lane, - params.inbound_lane_data, - params.size, - ); - - // update runtime storage - let (_, bridged_header_hash) = insert_header_to_grandpa_pallet::(state_root); - - FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: bridged_header_hash.into(), - storage_proof, - lane, - } -} - -/// Prepare proof of messages delivery for the `receive_messages_delivery_proof` call. -/// -/// This method is intended to be used when benchmarking pallet, linked to the chain that -/// uses parachain finality. For GRANDPA chains, please use the -/// `prepare_message_delivery_proof_from_grandpa_chain` function. -pub fn prepare_message_delivery_proof_from_parachain( - params: MessageDeliveryProofParams>>, -) -> FromBridgedChainMessagesDeliveryProof>> -where - R: pallet_bridge_parachains::Config, - PI: 'static, - B: MessageBridge, - UnderlyingChainOf>: Chain + Parachain, -{ - // prepare storage proof - let lane = params.lane; - let (state_root, storage_proof) = prepare_message_delivery_storage_proof::( - params.lane, - params.inbound_lane_data, - params.size, - ); - - // update runtime storage - let (_, bridged_header_hash) = - insert_header_to_parachains_pallet::>>(state_root); - - FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: bridged_header_hash.into(), - storage_proof, - lane, - } -} - -/// Insert header to the bridge GRANDPA pallet. -pub(crate) fn insert_header_to_grandpa_pallet( - state_root: bp_runtime::HashOf, -) -> (bp_runtime::BlockNumberOf, bp_runtime::HashOf) -where - R: pallet_bridge_grandpa::Config, - GI: 'static, - R::BridgedChain: bp_runtime::Chain, -{ - let bridged_block_number = Zero::zero(); - let bridged_header = bp_runtime::HeaderOf::::new( - bridged_block_number, - Default::default(), - state_root, - Default::default(), - Default::default(), - ); - let bridged_header_hash = bridged_header.hash(); - pallet_bridge_grandpa::initialize_for_benchmarks::(bridged_header); - (bridged_block_number, bridged_header_hash) -} - -/// Insert header to the bridge parachains pallet. -pub(crate) fn insert_header_to_parachains_pallet( - state_root: bp_runtime::HashOf, -) -> (bp_runtime::BlockNumberOf, bp_runtime::HashOf) -where - R: pallet_bridge_parachains::Config, - PI: 'static, - PC: Chain + Parachain, -{ - let bridged_block_number = Zero::zero(); - let bridged_header = bp_runtime::HeaderOf::::new( - bridged_block_number, - Default::default(), - state_root, - Default::default(), - Default::default(), - ); - let bridged_header_hash = bridged_header.hash(); - pallet_bridge_parachains::initialize_for_benchmarks::(bridged_header); - (bridged_block_number, bridged_header_hash) -} - -/// Returns callback which generates `BridgeMessage` from Polkadot XCM builder based on -/// `expected_message_size` for benchmark. -pub fn generate_xcm_builder_bridge_message_sample( - destination: InteriorLocation, -) -> impl Fn(usize) -> MessagePayload { - move |expected_message_size| -> MessagePayload { - // For XCM bridge hubs, it is the message that - // will be pushed further to some XCM queue (XCMP/UMP) - let location = xcm::VersionedInteriorLocation::V4(destination.clone()); - let location_encoded_size = location.encoded_size(); - - // we don't need to be super-precise with `expected_size` here - let xcm_size = expected_message_size.saturating_sub(location_encoded_size); - let xcm_data_size = xcm_size.saturating_sub( - // minus empty instruction size - Instruction::<()>::ExpectPallet { - index: 0, - name: vec![], - module_name: vec![], - crate_major: 0, - min_crate_minor: 0, - } - .encoded_size(), - ); - - log::trace!( - target: "runtime::bridge-benchmarks", - "generate_xcm_builder_bridge_message_sample with expected_message_size: {}, location_encoded_size: {}, xcm_size: {}, xcm_data_size: {}", - expected_message_size, location_encoded_size, xcm_size, xcm_data_size, - ); - - let xcm = xcm::VersionedXcm::<()>::V4( - vec![Instruction::<()>::ExpectPallet { - index: 0, - name: vec![42; xcm_data_size], - module_name: vec![], - crate_major: 0, - min_crate_minor: 0, - }] - .into(), - ); - - // this is the `BridgeMessage` from polkadot xcm builder, but it has no constructor - // or public fields, so just tuple - // (double encoding, because `.encode()` is called on original Xcm BLOB when it is pushed - // to the storage) - (location, xcm).encode().encode() - } -} diff --git a/bin/runtime-common/src/messages_call_ext.rs b/bin/runtime-common/src/messages_call_ext.rs deleted file mode 100644 index fb07f7b6dd69110918af23b227708e226bede625..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/messages_call_ext.rs +++ /dev/null @@ -1,692 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Signed extension for the `pallet-bridge-messages` that is able to reject obsolete -//! (and some other invalid) transactions. - -use crate::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, -}; -use bp_messages::{target_chain::MessageDispatch, InboundLaneData, LaneId, MessageNonce}; -use bp_runtime::OwnedBridgeModule; -use frame_support::{ - dispatch::CallableCallFor, - traits::{Get, IsSubType}, -}; -use pallet_bridge_messages::{Config, Pallet}; -use sp_runtime::{transaction_validity::TransactionValidity, RuntimeDebug}; -use sp_std::ops::RangeInclusive; - -/// Generic info about a messages delivery/confirmation proof. -#[derive(PartialEq, RuntimeDebug)] -pub struct BaseMessagesProofInfo { - /// Message lane, used by the call. - pub lane_id: LaneId, - /// Nonces of messages, included in the call. - /// - /// For delivery transaction, it is nonces of bundled messages. For confirmation - /// transaction, it is nonces that are to be confirmed during the call. - pub bundled_range: RangeInclusive, - /// Nonce of the best message, stored by this chain before the call is dispatched. - /// - /// For delivery transaction, it is the nonce of best delivered message before the call. - /// For confirmation transaction, it is the nonce of best confirmed message before the call. - pub best_stored_nonce: MessageNonce, -} - -impl BaseMessagesProofInfo { - /// Returns true if `bundled_range` continues the `0..=best_stored_nonce` range. - fn appends_to_stored_nonce(&self) -> bool { - Some(*self.bundled_range.start()) == self.best_stored_nonce.checked_add(1) - } -} - -/// Occupation state of the unrewarded relayers vector. -#[derive(PartialEq, RuntimeDebug)] -#[cfg_attr(test, derive(Default))] -pub struct UnrewardedRelayerOccupation { - /// The number of remaining unoccupied entries for new relayers. - pub free_relayer_slots: MessageNonce, - /// The number of messages that we are ready to accept. - pub free_message_slots: MessageNonce, -} - -/// Info about a `ReceiveMessagesProof` call which tries to update a single lane. -#[derive(PartialEq, RuntimeDebug)] -pub struct ReceiveMessagesProofInfo { - /// Base messages proof info - pub base: BaseMessagesProofInfo, - /// State of unrewarded relayers vector. - pub unrewarded_relayers: UnrewardedRelayerOccupation, -} - -impl ReceiveMessagesProofInfo { - /// Returns true if: - /// - /// - either inbound lane is ready to accept bundled messages; - /// - /// - or there are no bundled messages, but the inbound lane is blocked by too many unconfirmed - /// messages and/or unrewarded relayers. - fn is_obsolete(&self, is_dispatcher_active: bool) -> bool { - // if dispatcher is inactive, we don't accept any delivery transactions - if !is_dispatcher_active { - return true - } - - // transactions with zero bundled nonces are not allowed, unless they're message - // delivery transactions, which brings reward confirmations required to unblock - // the lane - if self.base.bundled_range.is_empty() { - let empty_transactions_allowed = - // we allow empty transactions when we can't accept delivery from new relayers - self.unrewarded_relayers.free_relayer_slots == 0 || - // or if we can't accept new messages at all - self.unrewarded_relayers.free_message_slots == 0; - - return !empty_transactions_allowed - } - - // otherwise we require bundled messages to continue stored range - !self.base.appends_to_stored_nonce() - } -} - -/// Info about a `ReceiveMessagesDeliveryProof` call which tries to update a single lane. -#[derive(PartialEq, RuntimeDebug)] -pub struct ReceiveMessagesDeliveryProofInfo(pub BaseMessagesProofInfo); - -impl ReceiveMessagesDeliveryProofInfo { - /// Returns true if outbound lane is ready to accept confirmations of bundled messages. - fn is_obsolete(&self) -> bool { - self.0.bundled_range.is_empty() || !self.0.appends_to_stored_nonce() - } -} - -/// Info about a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call -/// which tries to update a single lane. -#[derive(PartialEq, RuntimeDebug)] -pub enum CallInfo { - /// Messages delivery call info. - ReceiveMessagesProof(ReceiveMessagesProofInfo), - /// Messages delivery confirmation call info. - ReceiveMessagesDeliveryProof(ReceiveMessagesDeliveryProofInfo), -} - -impl CallInfo { - /// Returns range of messages, bundled with the call. - pub fn bundled_messages(&self) -> RangeInclusive { - match *self { - Self::ReceiveMessagesProof(ref info) => info.base.bundled_range.clone(), - Self::ReceiveMessagesDeliveryProof(ref info) => info.0.bundled_range.clone(), - } - } -} - -/// Helper struct that provides methods for working with a call supported by `CallInfo`. -pub struct CallHelper, I: 'static> { - _phantom_data: sp_std::marker::PhantomData<(T, I)>, -} - -impl, I: 'static> CallHelper { - /// Returns true if: - /// - /// - call is `receive_messages_proof` and all messages have been delivered; - /// - /// - call is `receive_messages_delivery_proof` and all messages confirmations have been - /// received. - pub fn was_successful(info: &CallInfo) -> bool { - match info { - CallInfo::ReceiveMessagesProof(info) => { - let inbound_lane_data = - pallet_bridge_messages::InboundLanes::::get(info.base.lane_id); - if info.base.bundled_range.is_empty() { - let post_occupation = - unrewarded_relayers_occupation::(&inbound_lane_data); - // we don't care about `free_relayer_slots` here - it is checked in - // `is_obsolete` and every relayer has delivered at least one message, - // so if relayer slots are released, then message slots are also - // released - return post_occupation.free_message_slots > - info.unrewarded_relayers.free_message_slots - } - - inbound_lane_data.last_delivered_nonce() == *info.base.bundled_range.end() - }, - CallInfo::ReceiveMessagesDeliveryProof(info) => { - let outbound_lane_data = - pallet_bridge_messages::OutboundLanes::::get(info.0.lane_id); - outbound_lane_data.latest_received_nonce == *info.0.bundled_range.end() - }, - } - } -} - -/// Trait representing a call that is a sub type of `pallet_bridge_messages::Call`. -pub trait MessagesCallSubType, I: 'static>: - IsSubType, T>> -{ - /// Create a new instance of `ReceiveMessagesProofInfo` from a `ReceiveMessagesProof` call. - fn receive_messages_proof_info(&self) -> Option; - - /// Create a new instance of `ReceiveMessagesDeliveryProofInfo` from - /// a `ReceiveMessagesDeliveryProof` call. - fn receive_messages_delivery_proof_info(&self) -> Option; - - /// Create a new instance of `CallInfo` from a `ReceiveMessagesProof` - /// or a `ReceiveMessagesDeliveryProof` call. - fn call_info(&self) -> Option; - - /// Create a new instance of `CallInfo` from a `ReceiveMessagesProof` - /// or a `ReceiveMessagesDeliveryProof` call, if the call is for the provided lane. - fn call_info_for(&self, lane_id: LaneId) -> Option; - - /// Ensures that a `ReceiveMessagesProof` or a `ReceiveMessagesDeliveryProof` call: - /// - /// - does not deliver already delivered messages. We require all messages in the - /// `ReceiveMessagesProof` call to be undelivered; - /// - /// - does not submit empty `ReceiveMessagesProof` call with zero messages, unless the lane - /// needs to be unblocked by providing relayer rewards proof; - /// - /// - brings no new delivery confirmations in a `ReceiveMessagesDeliveryProof` call. We require - /// at least one new delivery confirmation in the unrewarded relayers set; - /// - /// - does not violate some basic (easy verifiable) messages pallet rules obsolete (like - /// submitting a call when a pallet is halted or delivering messages when a dispatcher is - /// inactive). - /// - /// If one of above rules is violated, the transaction is treated as invalid. - fn check_obsolete_call(&self) -> TransactionValidity; -} - -impl< - BridgedHeaderHash, - SourceHeaderChain: bp_messages::target_chain::SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof, - >, - TargetHeaderChain: bp_messages::source_chain::TargetHeaderChain< - >::OutboundPayload, - ::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof, - >, - Call: IsSubType, T>>, - T: frame_system::Config - + Config, - I: 'static, - > MessagesCallSubType for T::RuntimeCall -{ - fn receive_messages_proof_info(&self) -> Option { - if let Some(pallet_bridge_messages::Call::::receive_messages_proof { - ref proof, - .. - }) = self.is_sub_type() - { - let inbound_lane_data = pallet_bridge_messages::InboundLanes::::get(proof.lane); - - return Some(ReceiveMessagesProofInfo { - base: BaseMessagesProofInfo { - lane_id: proof.lane, - // we want all messages in this range to be new for us. Otherwise transaction - // will be considered obsolete. - bundled_range: proof.nonces_start..=proof.nonces_end, - best_stored_nonce: inbound_lane_data.last_delivered_nonce(), - }, - unrewarded_relayers: unrewarded_relayers_occupation::(&inbound_lane_data), - }) - } - - None - } - - fn receive_messages_delivery_proof_info(&self) -> Option { - if let Some(pallet_bridge_messages::Call::::receive_messages_delivery_proof { - ref proof, - ref relayers_state, - .. - }) = self.is_sub_type() - { - let outbound_lane_data = pallet_bridge_messages::OutboundLanes::::get(proof.lane); - - return Some(ReceiveMessagesDeliveryProofInfo(BaseMessagesProofInfo { - lane_id: proof.lane, - // there's a time frame between message delivery, message confirmation and reward - // confirmation. Because of that, we can't assume that our state has been confirmed - // to the bridged chain. So we are accepting any proof that brings new - // confirmations. - bundled_range: outbound_lane_data.latest_received_nonce + 1..= - relayers_state.last_delivered_nonce, - best_stored_nonce: outbound_lane_data.latest_received_nonce, - })) - } - - None - } - - fn call_info(&self) -> Option { - if let Some(info) = self.receive_messages_proof_info() { - return Some(CallInfo::ReceiveMessagesProof(info)) - } - - if let Some(info) = self.receive_messages_delivery_proof_info() { - return Some(CallInfo::ReceiveMessagesDeliveryProof(info)) - } - - None - } - - fn call_info_for(&self, lane_id: LaneId) -> Option { - self.call_info().filter(|info| { - let actual_lane_id = match info { - CallInfo::ReceiveMessagesProof(info) => info.base.lane_id, - CallInfo::ReceiveMessagesDeliveryProof(info) => info.0.lane_id, - }; - actual_lane_id == lane_id - }) - } - - fn check_obsolete_call(&self) -> TransactionValidity { - let is_pallet_halted = Pallet::::ensure_not_halted().is_err(); - match self.call_info() { - Some(proof_info) if is_pallet_halted => { - log::trace!( - target: pallet_bridge_messages::LOG_TARGET, - "Rejecting messages transaction on halted pallet: {:?}", - proof_info - ); - - return sp_runtime::transaction_validity::InvalidTransaction::Call.into() - }, - Some(CallInfo::ReceiveMessagesProof(proof_info)) - if proof_info.is_obsolete(T::MessageDispatch::is_active()) => - { - log::trace!( - target: pallet_bridge_messages::LOG_TARGET, - "Rejecting obsolete messages delivery transaction: {:?}", - proof_info - ); - - return sp_runtime::transaction_validity::InvalidTransaction::Stale.into() - }, - Some(CallInfo::ReceiveMessagesDeliveryProof(proof_info)) - if proof_info.is_obsolete() => - { - log::trace!( - target: pallet_bridge_messages::LOG_TARGET, - "Rejecting obsolete messages confirmation transaction: {:?}", - proof_info, - ); - - return sp_runtime::transaction_validity::InvalidTransaction::Stale.into() - }, - _ => {}, - } - - Ok(sp_runtime::transaction_validity::ValidTransaction::default()) - } -} - -/// Returns occupation state of unrewarded relayers vector. -fn unrewarded_relayers_occupation, I: 'static>( - inbound_lane_data: &InboundLaneData, -) -> UnrewardedRelayerOccupation { - UnrewardedRelayerOccupation { - free_relayer_slots: T::MaxUnrewardedRelayerEntriesAtInboundLane::get() - .saturating_sub(inbound_lane_data.relayers.len() as MessageNonce), - free_message_slots: { - let unconfirmed_messages = inbound_lane_data - .last_delivered_nonce() - .saturating_sub(inbound_lane_data.last_confirmed_nonce); - T::MaxUnconfirmedMessagesAtInboundLane::get().saturating_sub(unconfirmed_messages) - }, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, - }, - messages_call_ext::MessagesCallSubType, - mock::{ - DummyMessageDispatch, MaxUnconfirmedMessagesAtInboundLane, - MaxUnrewardedRelayerEntriesAtInboundLane, TestRuntime, ThisChainRuntimeCall, - }, - }; - use bp_messages::{DeliveredMessages, UnrewardedRelayer, UnrewardedRelayersState}; - use sp_std::ops::RangeInclusive; - - fn fill_unrewarded_relayers() { - let mut inbound_lane_state = - pallet_bridge_messages::InboundLanes::::get(LaneId([0, 0, 0, 0])); - for n in 0..MaxUnrewardedRelayerEntriesAtInboundLane::get() { - inbound_lane_state.relayers.push_back(UnrewardedRelayer { - relayer: Default::default(), - messages: DeliveredMessages { begin: n + 1, end: n + 1 }, - }); - } - pallet_bridge_messages::InboundLanes::::insert( - LaneId([0, 0, 0, 0]), - inbound_lane_state, - ); - } - - fn fill_unrewarded_messages() { - let mut inbound_lane_state = - pallet_bridge_messages::InboundLanes::::get(LaneId([0, 0, 0, 0])); - inbound_lane_state.relayers.push_back(UnrewardedRelayer { - relayer: Default::default(), - messages: DeliveredMessages { - begin: 1, - end: MaxUnconfirmedMessagesAtInboundLane::get(), - }, - }); - pallet_bridge_messages::InboundLanes::::insert( - LaneId([0, 0, 0, 0]), - inbound_lane_state, - ); - } - - fn deliver_message_10() { - pallet_bridge_messages::InboundLanes::::insert( - LaneId([0, 0, 0, 0]), - bp_messages::InboundLaneData { relayers: Default::default(), last_confirmed_nonce: 10 }, - ); - } - - fn validate_message_delivery( - nonces_start: bp_messages::MessageNonce, - nonces_end: bp_messages::MessageNonce, - ) -> bool { - ThisChainRuntimeCall::BridgeMessages( - pallet_bridge_messages::Call::::receive_messages_proof { - relayer_id_at_bridged_chain: 42, - messages_count: nonces_end.checked_sub(nonces_start).map(|x| x + 1).unwrap_or(0) - as u32, - dispatch_weight: frame_support::weights::Weight::zero(), - proof: FromBridgedChainMessagesProof { - bridged_header_hash: Default::default(), - storage_proof: vec![], - lane: LaneId([0, 0, 0, 0]), - nonces_start, - nonces_end, - }, - }, - ) - .check_obsolete_call() - .is_ok() - } - - #[test] - fn extension_rejects_obsolete_messages() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best delivered is message#10 and we're trying to deliver messages 8..=9 - // => tx is rejected - deliver_message_10(); - assert!(!validate_message_delivery(8, 9)); - }); - } - - #[test] - fn extension_rejects_same_message() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best delivered is message#10 and we're trying to import messages 10..=10 - // => tx is rejected - deliver_message_10(); - assert!(!validate_message_delivery(8, 10)); - }); - } - - #[test] - fn extension_rejects_call_with_some_obsolete_messages() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best delivered is message#10 and we're trying to deliver messages - // 10..=15 => tx is rejected - deliver_message_10(); - assert!(!validate_message_delivery(10, 15)); - }); - } - - #[test] - fn extension_rejects_call_with_future_messages() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best delivered is message#10 and we're trying to deliver messages - // 13..=15 => tx is rejected - deliver_message_10(); - assert!(!validate_message_delivery(13, 15)); - }); - } - - #[test] - fn extension_reject_call_when_dispatcher_is_inactive() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best delivered is message#10 and we're trying to deliver message 11..=15 - // => tx is accepted, but we have inactive dispatcher, so... - deliver_message_10(); - - DummyMessageDispatch::deactivate(); - assert!(!validate_message_delivery(11, 15)); - }); - } - - #[test] - fn extension_rejects_empty_delivery_with_rewards_confirmations_if_there_are_free_relayer_and_message_slots( - ) { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - deliver_message_10(); - assert!(!validate_message_delivery(10, 9)); - }); - } - - #[test] - fn extension_accepts_empty_delivery_with_rewards_confirmations_if_there_are_no_free_relayer_slots( - ) { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - deliver_message_10(); - fill_unrewarded_relayers(); - assert!(validate_message_delivery(10, 9)); - }); - } - - #[test] - fn extension_accepts_empty_delivery_with_rewards_confirmations_if_there_are_no_free_message_slots( - ) { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - fill_unrewarded_messages(); - assert!(validate_message_delivery( - MaxUnconfirmedMessagesAtInboundLane::get(), - MaxUnconfirmedMessagesAtInboundLane::get() - 1 - )); - }); - } - - #[test] - fn extension_accepts_new_messages() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best delivered is message#10 and we're trying to deliver message 11..=15 - // => tx is accepted - deliver_message_10(); - assert!(validate_message_delivery(11, 15)); - }); - } - - fn confirm_message_10() { - pallet_bridge_messages::OutboundLanes::::insert( - LaneId([0, 0, 0, 0]), - bp_messages::OutboundLaneData { - oldest_unpruned_nonce: 0, - latest_received_nonce: 10, - latest_generated_nonce: 10, - }, - ); - } - - fn validate_message_confirmation(last_delivered_nonce: bp_messages::MessageNonce) -> bool { - ThisChainRuntimeCall::BridgeMessages( - pallet_bridge_messages::Call::::receive_messages_delivery_proof { - proof: FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: Default::default(), - storage_proof: Vec::new(), - lane: LaneId([0, 0, 0, 0]), - }, - relayers_state: UnrewardedRelayersState { - last_delivered_nonce, - ..Default::default() - }, - }, - ) - .check_obsolete_call() - .is_ok() - } - - #[test] - fn extension_rejects_obsolete_confirmations() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best confirmed is message#10 and we're trying to confirm message#5 => tx - // is rejected - confirm_message_10(); - assert!(!validate_message_confirmation(5)); - }); - } - - #[test] - fn extension_rejects_same_confirmation() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best confirmed is message#10 and we're trying to confirm message#10 => - // tx is rejected - confirm_message_10(); - assert!(!validate_message_confirmation(10)); - }); - } - - #[test] - fn extension_rejects_empty_confirmation_even_if_there_are_no_free_unrewarded_entries() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - confirm_message_10(); - fill_unrewarded_relayers(); - assert!(!validate_message_confirmation(10)); - }); - } - - #[test] - fn extension_accepts_new_confirmation() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - // when current best confirmed is message#10 and we're trying to confirm message#15 => - // tx is accepted - confirm_message_10(); - assert!(validate_message_confirmation(15)); - }); - } - - fn was_message_delivery_successful( - bundled_range: RangeInclusive, - is_empty: bool, - ) -> bool { - CallHelper::::was_successful(&CallInfo::ReceiveMessagesProof( - ReceiveMessagesProofInfo { - base: BaseMessagesProofInfo { - lane_id: LaneId([0, 0, 0, 0]), - bundled_range, - best_stored_nonce: 0, // doesn't matter for `was_successful` - }, - unrewarded_relayers: UnrewardedRelayerOccupation { - free_relayer_slots: 0, // doesn't matter for `was_successful` - free_message_slots: if is_empty { - 0 - } else { - MaxUnconfirmedMessagesAtInboundLane::get() - }, - }, - }, - )) - } - - #[test] - #[allow(clippy::reversed_empty_ranges)] - fn was_successful_returns_false_for_failed_reward_confirmation_transaction() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - fill_unrewarded_messages(); - assert!(!was_message_delivery_successful(10..=9, true)); - }); - } - - #[test] - #[allow(clippy::reversed_empty_ranges)] - fn was_successful_returns_true_for_successful_reward_confirmation_transaction() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - assert!(was_message_delivery_successful(10..=9, true)); - }); - } - - #[test] - fn was_successful_returns_false_for_failed_delivery() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - deliver_message_10(); - assert!(!was_message_delivery_successful(10..=12, false)); - }); - } - - #[test] - fn was_successful_returns_false_for_partially_successful_delivery() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - deliver_message_10(); - assert!(!was_message_delivery_successful(9..=12, false)); - }); - } - - #[test] - fn was_successful_returns_true_for_successful_delivery() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - deliver_message_10(); - assert!(was_message_delivery_successful(9..=10, false)); - }); - } - - fn was_message_confirmation_successful(bundled_range: RangeInclusive) -> bool { - CallHelper::::was_successful(&CallInfo::ReceiveMessagesDeliveryProof( - ReceiveMessagesDeliveryProofInfo(BaseMessagesProofInfo { - lane_id: LaneId([0, 0, 0, 0]), - bundled_range, - best_stored_nonce: 0, // doesn't matter for `was_successful` - }), - )) - } - - #[test] - fn was_successful_returns_false_for_failed_confirmation() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - confirm_message_10(); - assert!(!was_message_confirmation_successful(10..=12)); - }); - } - - #[test] - fn was_successful_returns_false_for_partially_successful_confirmation() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - confirm_message_10(); - assert!(!was_message_confirmation_successful(9..=12)); - }); - } - - #[test] - fn was_successful_returns_true_for_successful_confirmation() { - sp_io::TestExternalities::new(Default::default()).execute_with(|| { - confirm_message_10(); - assert!(was_message_confirmation_successful(9..=10)); - }); - } -} diff --git a/bin/runtime-common/src/messages_generation.rs b/bin/runtime-common/src/messages_generation.rs deleted file mode 100644 index c37aaa5d4d5378a1b76507e017c73aec3c7aabbd..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/messages_generation.rs +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Helpers for generating message storage proofs, that are used by tests and by benchmarks. - -use crate::messages::{AccountIdOf, BridgedChain, HashOf, HasherOf, MessageBridge, ThisChain}; - -use bp_messages::{ - storage_keys, InboundLaneData, LaneId, MessageKey, MessageNonce, MessagePayload, - OutboundLaneData, -}; -use bp_runtime::{record_all_trie_keys, RawStorageProof, StorageProofSize}; -use codec::Encode; -use sp_std::{ops::RangeInclusive, prelude::*}; -use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; - -/// Simple and correct message data encode function. -pub fn encode_all_messages(_: MessageNonce, m: &MessagePayload) -> Option> { - Some(m.encode()) -} - -/// Simple and correct outbound lane data encode function. -pub fn encode_lane_data(d: &OutboundLaneData) -> Vec { - d.encode() -} - -/// Prepare storage proof of given messages. -/// -/// Returns state trie root and nodes with prepared messages. -pub fn prepare_messages_storage_proof( - lane: LaneId, - message_nonces: RangeInclusive, - outbound_lane_data: Option, - size: StorageProofSize, - message_payload: MessagePayload, - encode_message: impl Fn(MessageNonce, &MessagePayload) -> Option>, - encode_outbound_lane_data: impl Fn(&OutboundLaneData) -> Vec, -) -> (HashOf>, RawStorageProof) -where - B: MessageBridge, - HashOf>: Copy + Default, -{ - // prepare Bridged chain storage with messages and (optionally) outbound lane state - let message_count = message_nonces.end().saturating_sub(*message_nonces.start()) + 1; - let mut storage_keys = Vec::with_capacity(message_count as usize + 1); - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); - - // insert messages - for (i, nonce) in message_nonces.into_iter().enumerate() { - let message_key = MessageKey { lane_id: lane, nonce }; - let message_payload = match encode_message(nonce, &message_payload) { - Some(message_payload) => - if i == 0 { - grow_trie_leaf_value(message_payload, size) - } else { - message_payload - }, - None => continue, - }; - let storage_key = storage_keys::message_key( - B::BRIDGED_MESSAGES_PALLET_NAME, - &message_key.lane_id, - message_key.nonce, - ) - .0; - trie.insert(&storage_key, &message_payload) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - } - - // insert outbound lane state - if let Some(outbound_lane_data) = outbound_lane_data.as_ref().map(encode_outbound_lane_data) - { - let storage_key = - storage_keys::outbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; - trie.insert(&storage_key, &outbound_lane_data) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - } - } - - // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); - (root, storage_proof) -} - -/// Prepare storage proof of given messages delivery. -/// -/// Returns state trie root and nodes with prepared messages. -pub fn prepare_message_delivery_storage_proof( - lane: LaneId, - inbound_lane_data: InboundLaneData>>, - size: StorageProofSize, -) -> (HashOf>, RawStorageProof) -where - B: MessageBridge, -{ - // prepare Bridged chain storage with inbound lane state - let storage_key = storage_keys::inbound_lane_data_key(B::BRIDGED_MESSAGES_PALLET_NAME, &lane).0; - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = - TrieDBMutBuilderV1::>>::new(&mut mdb, &mut root).build(); - let inbound_lane_data = grow_trie_leaf_value(inbound_lane_data.encode(), size); - trie.insert(&storage_key, &inbound_lane_data) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - } - - // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::>>, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); - - (root, storage_proof) -} - -/// Add extra data to the trie leaf value so that it'll be of given size. -pub fn grow_trie_leaf_value(mut value: Vec, size: StorageProofSize) -> Vec { - match size { - StorageProofSize::Minimal(_) => (), - StorageProofSize::HasLargeLeaf(size) if size as usize > value.len() => { - value.extend(sp_std::iter::repeat(42u8).take(size as usize - value.len())); - }, - StorageProofSize::HasLargeLeaf(_) => (), - } - value -} diff --git a/bin/runtime-common/src/messages_xcm_extension.rs b/bin/runtime-common/src/messages_xcm_extension.rs deleted file mode 100644 index e3da6155f08a198d5469adbfc64e40213eddf8eb..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/messages_xcm_extension.rs +++ /dev/null @@ -1,502 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module provides utilities for easier XCM handling, e.g: -//! `XcmExecutor` -> `MessageSender` -> `OutboundMessageQueue` -//! | -//! `Relayer` -//! | -//! `XcmRouter` <- `MessageDispatch` <- `InboundMessageQueue` - -use bp_messages::{ - source_chain::OnMessagesDelivered, - target_chain::{DispatchMessage, MessageDispatch}, - LaneId, MessageNonce, -}; -use bp_runtime::messages::MessageDispatchResult; -pub use bp_xcm_bridge_hub::XcmAsPlainPayload; -use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; -use codec::{Decode, Encode}; -use frame_support::{traits::Get, weights::Weight, CloneNoBound, EqNoBound, PartialEqNoBound}; -use pallet_bridge_messages::{ - Config as MessagesConfig, OutboundLanesCongestedSignals, WeightInfoExt as MessagesPalletWeights, -}; -use scale_info::TypeInfo; -use sp_runtime::SaturatedConversion; -use sp_std::{fmt::Debug, marker::PhantomData}; -use xcm::prelude::*; -use xcm_builder::{DispatchBlob, DispatchBlobError}; - -/// Message dispatch result type for single message. -#[derive(CloneNoBound, EqNoBound, PartialEqNoBound, Encode, Decode, Debug, TypeInfo)] -pub enum XcmBlobMessageDispatchResult { - /// We've been unable to decode message payload. - InvalidPayload, - /// Message has been dispatched. - Dispatched, - /// Message has **NOT** been dispatched because of given error. - NotDispatched(#[codec(skip)] Option), -} - -/// [`XcmBlobMessageDispatch`] is responsible for dispatching received messages -/// -/// It needs to be used at the target bridge hub. -pub struct XcmBlobMessageDispatch { - _marker: sp_std::marker::PhantomData<(DispatchBlob, Weights, Channel)>, -} - -impl< - BlobDispatcher: DispatchBlob, - Weights: MessagesPalletWeights, - Channel: XcmChannelStatusProvider, - > MessageDispatch for XcmBlobMessageDispatch -{ - type DispatchPayload = XcmAsPlainPayload; - type DispatchLevelResult = XcmBlobMessageDispatchResult; - - fn is_active() -> bool { - !Channel::is_congested() - } - - fn dispatch_weight(message: &mut DispatchMessage) -> Weight { - match message.data.payload { - Ok(ref payload) => { - let payload_size = payload.encoded_size().saturated_into(); - Weights::message_dispatch_weight(payload_size) - }, - Err(_) => Weight::zero(), - } - } - - fn dispatch( - message: DispatchMessage, - ) -> MessageDispatchResult { - let payload = match message.data.payload { - Ok(payload) => payload, - Err(e) => { - log::error!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "[XcmBlobMessageDispatch] payload error: {:?} - message_nonce: {:?}", - e, - message.key.nonce - ); - return MessageDispatchResult { - unspent_weight: Weight::zero(), - dispatch_level_result: XcmBlobMessageDispatchResult::InvalidPayload, - } - }, - }; - let dispatch_level_result = match BlobDispatcher::dispatch_blob(payload) { - Ok(_) => { - log::debug!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "[XcmBlobMessageDispatch] DispatchBlob::dispatch_blob was ok - message_nonce: {:?}", - message.key.nonce - ); - XcmBlobMessageDispatchResult::Dispatched - }, - Err(e) => { - log::error!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "[XcmBlobMessageDispatch] DispatchBlob::dispatch_blob failed, error: {:?} - message_nonce: {:?}", - e, message.key.nonce - ); - XcmBlobMessageDispatchResult::NotDispatched(Some(e)) - }, - }; - MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result } - } -} - -/// A pair of sending chain location and message lane, used by this chain to send messages -/// over the bridge. -#[cfg_attr(feature = "std", derive(Debug, Eq, PartialEq))] -pub struct SenderAndLane { - /// Sending chain relative location. - pub location: Location, - /// Message lane, used by the sending chain. - pub lane: LaneId, -} - -impl SenderAndLane { - /// Create new object using provided location and lane. - pub fn new(location: Location, lane: LaneId) -> Self { - SenderAndLane { location, lane } - } -} - -/// [`XcmBlobHauler`] is responsible for sending messages to the bridge "point-to-point link" from -/// one side, where on the other it can be dispatched by [`XcmBlobMessageDispatch`]. -pub trait XcmBlobHauler { - /// Runtime that has messages pallet deployed. - type Runtime: MessagesConfig; - /// Instance of the messages pallet that is used to send messages. - type MessagesInstance: 'static; - - /// Actual XCM message sender (`HRMP` or `UMP`) to the source chain - /// location (`Self::SenderAndLane::get().location`). - type ToSourceChainSender: SendXcm; - /// An XCM message that is sent to the sending chain when the bridge queue becomes congested. - type CongestedMessage: Get>>; - /// An XCM message that is sent to the sending chain when the bridge queue becomes not - /// congested. - type UncongestedMessage: Get>>; - - /// Returns `true` if we want to handle congestion. - fn supports_congestion_detection() -> bool { - Self::CongestedMessage::get().is_some() || Self::UncongestedMessage::get().is_some() - } -} - -/// XCM bridge adapter which connects [`XcmBlobHauler`] with [`pallet_bridge_messages`] and -/// makes sure that XCM blob is sent to the outbound lane to be relayed. -/// -/// It needs to be used at the source bridge hub. -pub struct XcmBlobHaulerAdapter( - sp_std::marker::PhantomData<(XcmBlobHauler, Lanes)>, -); - -impl< - H: XcmBlobHauler, - Lanes: Get>, - > OnMessagesDelivered for XcmBlobHaulerAdapter -{ - fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce) { - if let Some(sender_and_lane) = - Lanes::get().iter().find(|link| link.0.lane == lane).map(|link| &link.0) - { - // notify XCM queue manager about updated lane state - LocalXcmQueueManager::::on_bridge_messages_delivered( - sender_and_lane, - enqueued_messages, - ); - } - } -} - -/// Manager of local XCM queues (and indirectly - underlying transport channels) that -/// controls the queue state. -/// -/// It needs to be used at the source bridge hub. -pub struct LocalXcmQueueManager(PhantomData); - -/// Maximal number of messages in the outbound bridge queue. Once we reach this limit, we -/// send a "congestion" XCM message to the sending chain. -const OUTBOUND_LANE_CONGESTED_THRESHOLD: MessageNonce = 8_192; - -/// After we have sent "congestion" XCM message to the sending chain, we wait until number -/// of messages in the outbound bridge queue drops to this count, before sending `uncongestion` -/// XCM message. -const OUTBOUND_LANE_UNCONGESTED_THRESHOLD: MessageNonce = 1_024; - -impl LocalXcmQueueManager { - /// Must be called whenever we push a message to the bridge lane. - pub fn on_bridge_message_enqueued( - sender_and_lane: &SenderAndLane, - enqueued_messages: MessageNonce, - ) { - // skip if we dont want to handle congestion - if !H::supports_congestion_detection() { - return - } - - // if we have already sent the congestion signal, we don't want to do anything - if Self::is_congested_signal_sent(sender_and_lane.lane) { - return - } - - // if the bridge queue is not congested, we don't want to do anything - let is_congested = enqueued_messages > OUTBOUND_LANE_CONGESTED_THRESHOLD; - if !is_congested { - return - } - - log::info!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "Sending 'congested' XCM message to {:?} to avoid overloading lane {:?}: there are\ - {} messages queued at the bridge queue", - sender_and_lane.location, - sender_and_lane.lane, - enqueued_messages, - ); - - if let Err(e) = Self::send_congested_signal(sender_and_lane) { - log::info!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "Failed to send the 'congested' XCM message to {:?}: {:?}", - sender_and_lane.location, - e, - ); - } - } - - /// Must be called whenever we receive a message delivery confirmation. - pub fn on_bridge_messages_delivered( - sender_and_lane: &SenderAndLane, - enqueued_messages: MessageNonce, - ) { - // skip if we dont want to handle congestion - if !H::supports_congestion_detection() { - return - } - - // if we have not sent the congestion signal before, we don't want to do anything - if !Self::is_congested_signal_sent(sender_and_lane.lane) { - return - } - - // if the bridge queue is still congested, we don't want to do anything - let is_congested = enqueued_messages > OUTBOUND_LANE_UNCONGESTED_THRESHOLD; - if is_congested { - return - } - - log::info!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "Sending 'uncongested' XCM message to {:?}. Lane {:?}: there are\ - {} messages queued at the bridge queue", - sender_and_lane.location, - sender_and_lane.lane, - enqueued_messages, - ); - - if let Err(e) = Self::send_uncongested_signal(sender_and_lane) { - log::info!( - target: crate::LOG_TARGET_BRIDGE_DISPATCH, - "Failed to send the 'uncongested' XCM message to {:?}: {:?}", - sender_and_lane.location, - e, - ); - } - } - - /// Returns true if we have sent "congested" signal to the `sending_chain_location`. - fn is_congested_signal_sent(lane: LaneId) -> bool { - OutboundLanesCongestedSignals::::get(lane) - } - - /// Send congested signal to the `sending_chain_location`. - fn send_congested_signal(sender_and_lane: &SenderAndLane) -> Result<(), SendError> { - if let Some(msg) = H::CongestedMessage::get() { - send_xcm::(sender_and_lane.location.clone(), msg)?; - OutboundLanesCongestedSignals::::insert( - sender_and_lane.lane, - true, - ); - } - Ok(()) - } - - /// Send `uncongested` signal to the `sending_chain_location`. - fn send_uncongested_signal(sender_and_lane: &SenderAndLane) -> Result<(), SendError> { - if let Some(msg) = H::UncongestedMessage::get() { - send_xcm::(sender_and_lane.location.clone(), msg)?; - OutboundLanesCongestedSignals::::remove( - sender_and_lane.lane, - ); - } - Ok(()) - } -} - -/// Adapter for the implementation of `GetVersion`, which attempts to find the minimal -/// configured XCM version between the destination `dest` and the bridge hub location provided as -/// `Get`. -pub struct XcmVersionOfDestAndRemoteBridge( - sp_std::marker::PhantomData<(Version, RemoteBridge)>, -); -impl> GetVersion - for XcmVersionOfDestAndRemoteBridge -{ - fn get_version_for(dest: &Location) -> Option { - let dest_version = Version::get_version_for(dest); - let bridge_hub_version = Version::get_version_for(&RemoteBridge::get()); - - match (dest_version, bridge_hub_version) { - (Some(dv), Some(bhv)) => Some(sp_std::cmp::min(dv, bhv)), - (Some(dv), None) => Some(dv), - (None, Some(bhv)) => Some(bhv), - (None, None) => None, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - - use bp_messages::OutboundLaneData; - use frame_support::parameter_types; - use pallet_bridge_messages::OutboundLanes; - - parameter_types! { - pub TestSenderAndLane: SenderAndLane = SenderAndLane { - location: Location::new(1, [Parachain(1000)]), - lane: TEST_LANE_ID, - }; - pub TestLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = sp_std::vec![ - (TestSenderAndLane::get(), (NetworkId::ByGenesis([0; 32]), InteriorLocation::Here)) - ]; - pub DummyXcmMessage: Xcm<()> = Xcm::new(); - } - - struct DummySendXcm; - - impl DummySendXcm { - fn messages_sent() -> u32 { - frame_support::storage::unhashed::get(b"DummySendXcm").unwrap_or(0) - } - } - - impl SendXcm for DummySendXcm { - type Ticket = (); - - fn validate( - _destination: &mut Option, - _message: &mut Option>, - ) -> SendResult { - Ok(((), Default::default())) - } - - fn deliver(_ticket: Self::Ticket) -> Result { - let messages_sent: u32 = Self::messages_sent(); - frame_support::storage::unhashed::put(b"DummySendXcm", &(messages_sent + 1)); - Ok(XcmHash::default()) - } - } - - struct TestBlobHauler; - - impl XcmBlobHauler for TestBlobHauler { - type Runtime = TestRuntime; - type MessagesInstance = (); - - type ToSourceChainSender = DummySendXcm; - type CongestedMessage = DummyXcmMessage; - type UncongestedMessage = DummyXcmMessage; - } - - type TestBlobHaulerAdapter = XcmBlobHaulerAdapter; - - fn fill_up_lane_to_congestion() -> MessageNonce { - let latest_generated_nonce = OUTBOUND_LANE_CONGESTED_THRESHOLD; - OutboundLanes::::insert( - TEST_LANE_ID, - OutboundLaneData { - oldest_unpruned_nonce: 0, - latest_received_nonce: 0, - latest_generated_nonce, - }, - ); - latest_generated_nonce - } - - #[test] - fn congested_signal_is_not_sent_twice() { - run_test(|| { - let enqueued = fill_up_lane_to_congestion(); - - // next sent message leads to congested signal - LocalXcmQueueManager::::on_bridge_message_enqueued( - &TestSenderAndLane::get(), - enqueued + 1, - ); - assert_eq!(DummySendXcm::messages_sent(), 1); - - // next sent message => we don't sent another congested signal - LocalXcmQueueManager::::on_bridge_message_enqueued( - &TestSenderAndLane::get(), - enqueued, - ); - assert_eq!(DummySendXcm::messages_sent(), 1); - }); - } - - #[test] - fn congested_signal_is_not_sent_when_outbound_lane_is_not_congested() { - run_test(|| { - LocalXcmQueueManager::::on_bridge_message_enqueued( - &TestSenderAndLane::get(), - 1, - ); - assert_eq!(DummySendXcm::messages_sent(), 0); - }); - } - - #[test] - fn congested_signal_is_sent_when_outbound_lane_is_congested() { - run_test(|| { - let enqueued = fill_up_lane_to_congestion(); - - // next sent message leads to congested signal - LocalXcmQueueManager::::on_bridge_message_enqueued( - &TestSenderAndLane::get(), - enqueued + 1, - ); - assert_eq!(DummySendXcm::messages_sent(), 1); - assert!(LocalXcmQueueManager::::is_congested_signal_sent(TEST_LANE_ID)); - }); - } - - #[test] - fn uncongested_signal_is_not_sent_when_messages_are_delivered_at_other_lane() { - run_test(|| { - LocalXcmQueueManager::::send_congested_signal(&TestSenderAndLane::get()).unwrap(); - assert_eq!(DummySendXcm::messages_sent(), 1); - - // when we receive a delivery report for other lane, we don't send an uncongested signal - TestBlobHaulerAdapter::on_messages_delivered(LaneId([42, 42, 42, 42]), 0); - assert_eq!(DummySendXcm::messages_sent(), 1); - }); - } - - #[test] - fn uncongested_signal_is_not_sent_when_we_havent_send_congested_signal_before() { - run_test(|| { - TestBlobHaulerAdapter::on_messages_delivered(TEST_LANE_ID, 0); - assert_eq!(DummySendXcm::messages_sent(), 0); - }); - } - - #[test] - fn uncongested_signal_is_not_sent_if_outbound_lane_is_still_congested() { - run_test(|| { - LocalXcmQueueManager::::send_congested_signal(&TestSenderAndLane::get()).unwrap(); - assert_eq!(DummySendXcm::messages_sent(), 1); - - TestBlobHaulerAdapter::on_messages_delivered( - TEST_LANE_ID, - OUTBOUND_LANE_UNCONGESTED_THRESHOLD + 1, - ); - assert_eq!(DummySendXcm::messages_sent(), 1); - }); - } - - #[test] - fn uncongested_signal_is_sent_if_outbound_lane_is_uncongested() { - run_test(|| { - LocalXcmQueueManager::::send_congested_signal(&TestSenderAndLane::get()).unwrap(); - assert_eq!(DummySendXcm::messages_sent(), 1); - - TestBlobHaulerAdapter::on_messages_delivered( - TEST_LANE_ID, - OUTBOUND_LANE_UNCONGESTED_THRESHOLD, - ); - assert_eq!(DummySendXcm::messages_sent(), 2); - }); - } -} diff --git a/bin/runtime-common/src/mock.rs b/bin/runtime-common/src/mock.rs deleted file mode 100644 index 3d15ba8b7dd3c3a25d887be7fff1814128cb3802..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/mock.rs +++ /dev/null @@ -1,428 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! A mock runtime for testing different stuff in the crate. - -#![cfg(test)] - -use crate::messages::{ - source::{ - FromThisChainMaximalOutboundPayloadSize, FromThisChainMessagePayload, - TargetHeaderChainAdapter, - }, - target::{FromBridgedChainMessagePayload, SourceHeaderChainAdapter}, - BridgedChainWithMessages, HashOf, MessageBridge, ThisChainWithMessages, -}; - -use bp_header_chain::{ChainWithGrandpa, HeaderChain}; -use bp_messages::{ - target_chain::{DispatchMessage, MessageDispatch}, - LaneId, MessageNonce, -}; -use bp_parachains::SingleParaStoredHeaderDataBuilder; -use bp_relayers::PayRewardFromAccount; -use bp_runtime::{ - messages::MessageDispatchResult, Chain, ChainId, Parachain, UnderlyingChainProvider, -}; -use codec::{Decode, Encode}; -use frame_support::{ - derive_impl, parameter_types, - weights::{ConstantMultiplier, IdentityFee, RuntimeDbWeight, Weight}, -}; -use pallet_transaction_payment::Multiplier; -use sp_runtime::{ - testing::H256, - traits::{BlakeTwo256, ConstU32, ConstU64, ConstU8}, - FixedPointNumber, Perquintill, -}; - -/// Account identifier at `ThisChain`. -pub type ThisChainAccountId = u64; -/// Balance at `ThisChain`. -pub type ThisChainBalance = u64; -/// Block number at `ThisChain`. -pub type ThisChainBlockNumber = u32; -/// Hash at `ThisChain`. -pub type ThisChainHash = H256; -/// Hasher at `ThisChain`. -pub type ThisChainHasher = BlakeTwo256; -/// Runtime call at `ThisChain`. -pub type ThisChainRuntimeCall = RuntimeCall; -/// Runtime call origin at `ThisChain`. -pub type ThisChainCallOrigin = RuntimeOrigin; -/// Header of `ThisChain`. -pub type ThisChainHeader = sp_runtime::generic::Header; -/// Block of `ThisChain`. -pub type ThisChainBlock = frame_system::mocking::MockBlockU32; - -/// Account identifier at the `BridgedChain`. -pub type BridgedChainAccountId = u128; -/// Balance at the `BridgedChain`. -pub type BridgedChainBalance = u128; -/// Block number at the `BridgedChain`. -pub type BridgedChainBlockNumber = u32; -/// Hash at the `BridgedChain`. -pub type BridgedChainHash = H256; -/// Hasher at the `BridgedChain`. -pub type BridgedChainHasher = BlakeTwo256; -/// Header of the `BridgedChain`. -pub type BridgedChainHeader = - sp_runtime::generic::Header; - -/// Rewards payment procedure. -pub type TestPaymentProcedure = PayRewardFromAccount; -/// Stake that we are using in tests. -pub type TestStake = ConstU64<5_000>; -/// Stake and slash mechanism to use in tests. -pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< - ThisChainAccountId, - ThisChainBlockNumber, - Balances, - ReserveId, - TestStake, - ConstU32<8>, ->; - -/// Message lane used in tests. -pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 0]); -/// Bridged chain id used in tests. -pub const TEST_BRIDGED_CHAIN_ID: ChainId = *b"brdg"; -/// Maximal extrinsic weight at the `BridgedChain`. -pub const BRIDGED_CHAIN_MAX_EXTRINSIC_WEIGHT: usize = 2048; -/// Maximal extrinsic size at the `BridgedChain`. -pub const BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE: u32 = 1024; - -frame_support::construct_runtime! { - pub enum TestRuntime - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Utility: pallet_utility, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - TransactionPayment: pallet_transaction_payment::{Pallet, Storage, Event}, - BridgeRelayers: pallet_bridge_relayers::{Pallet, Call, Storage, Event}, - BridgeGrandpa: pallet_bridge_grandpa::{Pallet, Call, Storage, Event}, - BridgeParachains: pallet_bridge_parachains::{Pallet, Call, Storage, Event}, - BridgeMessages: pallet_bridge_messages::{Pallet, Call, Storage, Event, Config}, - } -} - -crate::generate_bridge_reject_obsolete_headers_and_messages! { - ThisChainRuntimeCall, ThisChainAccountId, - BridgeGrandpa, BridgeParachains, BridgeMessages -} - -parameter_types! { - pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID]; - pub const BridgedChainId: ChainId = TEST_BRIDGED_CHAIN_ID; - pub const BridgedParasPalletName: &'static str = "Paras"; - pub const ExistentialDeposit: ThisChainBalance = 500; - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; - pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); - pub const TransactionBaseFee: ThisChainBalance = 0; - pub const TransactionByteFee: ThisChainBalance = 1; - pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(3, 100_000); - pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000u128); - pub MaximumMultiplier: Multiplier = sp_runtime::traits::Bounded::max_value(); - pub const MaxUnrewardedRelayerEntriesAtInboundLane: MessageNonce = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: MessageNonce = 1_000; - pub const ReserveId: [u8; 8] = *b"brdgrlrs"; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type Hash = ThisChainHash; - type Hashing = ThisChainHasher; - type AccountId = ThisChainAccountId; - type Block = ThisChainBlock; - type AccountData = pallet_balances::AccountData; - type BlockHashCount = ConstU32<250>; -} - -impl pallet_utility::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type RuntimeCall = RuntimeCall; - type PalletsOrigin = OriginCaller; - type WeightInfo = (); -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] -impl pallet_balances::Config for TestRuntime { - type ReserveIdentifier = [u8; 8]; - type AccountStore = System; -} - -#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig as pallet_transaction_payment::DefaultConfig)] -impl pallet_transaction_payment::Config for TestRuntime { - type OnChargeTransaction = pallet_transaction_payment::CurrencyAdapter; - type OperationalFeeMultiplier = ConstU8<5>; - type WeightToFee = IdentityFee; - type LengthToFee = ConstantMultiplier; - type FeeMultiplierUpdate = pallet_transaction_payment::TargetedFeeAdjustment< - TestRuntime, - TargetBlockFullness, - AdjustmentVariable, - MinimumMultiplier, - MaximumMultiplier, - >; -} - -impl pallet_bridge_grandpa::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = BridgedUnderlyingChain; - type MaxFreeHeadersPerBlock = ConstU32<4>; - type FreeHeadersInterval = ConstU32<1_024>; - type HeadersToKeep = ConstU32<8>; - type WeightInfo = pallet_bridge_grandpa::weights::BridgeWeight; -} - -impl pallet_bridge_parachains::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type BridgesGrandpaPalletInstance = (); - type ParasPalletName = BridgedParasPalletName; - type ParaStoredHeaderDataBuilder = - SingleParaStoredHeaderDataBuilder; - type HeadsToKeep = ConstU32<8>; - type MaxParaHeadDataSize = ConstU32<1024>; - type WeightInfo = pallet_bridge_parachains::weights::BridgeWeight; -} - -impl pallet_bridge_messages::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = pallet_bridge_messages::weights::BridgeWeight; - type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = FromThisChainMaximalOutboundPayloadSize; - type OutboundPayload = FromThisChainMessagePayload; - - type InboundPayload = FromBridgedChainMessagePayload; - type InboundRelayer = BridgedChainAccountId; - type DeliveryPayments = (); - - type TargetHeaderChain = TargetHeaderChainAdapter; - type DeliveryConfirmationPayments = pallet_bridge_relayers::DeliveryConfirmationPaymentsAdapter< - TestRuntime, - (), - ConstU64<100_000>, - >; - type OnMessagesDelivered = (); - - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = DummyMessageDispatch; - type BridgedChainId = BridgedChainId; -} - -impl pallet_bridge_relayers::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type Reward = ThisChainBalance; - type PaymentProcedure = TestPaymentProcedure; - type StakeAndSlash = TestStakeAndSlash; - type WeightInfo = (); -} - -/// Dummy message dispatcher. -pub struct DummyMessageDispatch; - -impl DummyMessageDispatch { - pub fn deactivate() { - frame_support::storage::unhashed::put(&b"inactive"[..], &false); - } -} - -impl MessageDispatch for DummyMessageDispatch { - type DispatchPayload = Vec; - type DispatchLevelResult = (); - - fn is_active() -> bool { - frame_support::storage::unhashed::take::(&b"inactive"[..]) != Some(false) - } - - fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { - Weight::zero() - } - - fn dispatch( - _: DispatchMessage, - ) -> MessageDispatchResult { - MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } - } -} - -/// Bridge that is deployed on `ThisChain` and allows sending/receiving messages to/from -/// `BridgedChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnThisChainBridge; - -impl MessageBridge for OnThisChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = ThisChain; - type BridgedChain = BridgedChain; - type BridgedHeaderChain = pallet_bridge_grandpa::GrandpaChainHeaders; -} - -/// Bridge that is deployed on `BridgedChain` and allows sending/receiving messages to/from -/// `ThisChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnBridgedChainBridge; - -impl MessageBridge for OnBridgedChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = BridgedChain; - type BridgedChain = ThisChain; - type BridgedHeaderChain = ThisHeaderChain; -} - -/// Dummy implementation of `HeaderChain` for `ThisChain` at the `BridgedChain`. -pub struct ThisHeaderChain; - -impl HeaderChain for ThisHeaderChain { - fn finalized_header_state_root(_hash: HashOf) -> Option> { - unreachable!() - } -} - -/// Call origin at `BridgedChain`. -#[derive(Clone, Debug)] -pub struct BridgedChainOrigin; - -impl From - for Result, BridgedChainOrigin> -{ - fn from( - _origin: BridgedChainOrigin, - ) -> Result, BridgedChainOrigin> { - unreachable!() - } -} - -/// Underlying chain of `ThisChain`. -pub struct ThisUnderlyingChain; - -impl Chain for ThisUnderlyingChain { - const ID: ChainId = *b"tuch"; - - type BlockNumber = ThisChainBlockNumber; - type Hash = ThisChainHash; - type Hasher = ThisChainHasher; - type Header = ThisChainHeader; - type AccountId = ThisChainAccountId; - type Balance = ThisChainBalance; - type Nonce = u32; - type Signature = sp_runtime::MultiSignature; - - fn max_extrinsic_size() -> u32 { - BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE - } - - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } -} - -/// The chain where we are in tests. -pub struct ThisChain; - -impl UnderlyingChainProvider for ThisChain { - type Chain = ThisUnderlyingChain; -} - -impl ThisChainWithMessages for ThisChain { - type RuntimeOrigin = ThisChainCallOrigin; -} - -impl BridgedChainWithMessages for ThisChain {} - -/// Underlying chain of `BridgedChain`. -pub struct BridgedUnderlyingChain; -/// Some parachain under `BridgedChain` consensus. -pub struct BridgedUnderlyingParachain; -/// Runtime call of the `BridgedChain`. -#[derive(Decode, Encode)] -pub struct BridgedChainCall; - -impl Chain for BridgedUnderlyingChain { - const ID: ChainId = *b"buch"; - - type BlockNumber = BridgedChainBlockNumber; - type Hash = BridgedChainHash; - type Hasher = BridgedChainHasher; - type Header = BridgedChainHeader; - type AccountId = BridgedChainAccountId; - type Balance = BridgedChainBalance; - type Nonce = u32; - type Signature = sp_runtime::MultiSignature; - - fn max_extrinsic_size() -> u32 { - BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE - } - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } -} - -impl ChainWithGrandpa for BridgedUnderlyingChain { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; - const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_MANDATORY_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE: u32 = 64; -} - -impl Chain for BridgedUnderlyingParachain { - const ID: ChainId = *b"bupc"; - - type BlockNumber = BridgedChainBlockNumber; - type Hash = BridgedChainHash; - type Hasher = BridgedChainHasher; - type Header = BridgedChainHeader; - type AccountId = BridgedChainAccountId; - type Balance = BridgedChainBalance; - type Nonce = u32; - type Signature = sp_runtime::MultiSignature; - - fn max_extrinsic_size() -> u32 { - BRIDGED_CHAIN_MAX_EXTRINSIC_SIZE - } - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } -} - -impl Parachain for BridgedUnderlyingParachain { - const PARACHAIN_ID: u32 = 42; - const MAX_HEADER_SIZE: u32 = 1_024; -} - -/// The other, bridged chain, used in tests. -pub struct BridgedChain; - -impl UnderlyingChainProvider for BridgedChain { - type Chain = BridgedUnderlyingChain; -} - -impl ThisChainWithMessages for BridgedChain { - type RuntimeOrigin = BridgedChainOrigin; -} - -impl BridgedChainWithMessages for BridgedChain {} - -/// Run test within test externalities. -pub fn run_test(test: impl FnOnce()) { - sp_io::TestExternalities::new(Default::default()).execute_with(test) -} diff --git a/bin/runtime-common/src/parachains_benchmarking.rs b/bin/runtime-common/src/parachains_benchmarking.rs deleted file mode 100644 index b3050b9ac0f3ccec617399d3eb91647dcab7eb3d..0000000000000000000000000000000000000000 --- a/bin/runtime-common/src/parachains_benchmarking.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything required to run benchmarks of parachains finality module. - -#![cfg(feature = "runtime-benchmarks")] - -use crate::{ - messages_benchmarking::insert_header_to_grandpa_pallet, - messages_generation::grow_trie_leaf_value, -}; - -use bp_parachains::parachain_head_storage_key_at_source; -use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::{record_all_trie_keys, StorageProofSize}; -use codec::Encode; -use frame_support::traits::Get; -use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; -use sp_std::prelude::*; -use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; - -/// Prepare proof of messages for the `receive_messages_proof` call. -/// -/// In addition to returning valid messages proof, environment is prepared to verify this message -/// proof. -pub fn prepare_parachain_heads_proof( - parachains: &[ParaId], - parachain_head_size: u32, - size: StorageProofSize, -) -> (RelayBlockNumber, RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>) -where - R: pallet_bridge_parachains::Config - + pallet_bridge_grandpa::Config, - PI: 'static, - >::BridgedChain: - bp_runtime::Chain, -{ - let parachain_head = ParaHead(vec![0u8; parachain_head_size as usize]); - - // insert all heads to the trie - let mut parachain_heads = Vec::with_capacity(parachains.len()); - let mut storage_keys = Vec::with_capacity(parachains.len()); - let mut state_root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = - TrieDBMutBuilderV1::::new(&mut mdb, &mut state_root).build(); - - // insert parachain heads - for (i, parachain) in parachains.into_iter().enumerate() { - let storage_key = - parachain_head_storage_key_at_source(R::ParasPalletName::get(), *parachain); - let leaf_data = if i == 0 { - grow_trie_leaf_value(parachain_head.encode(), size) - } else { - parachain_head.encode() - }; - trie.insert(&storage_key.0, &leaf_data) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in benchmarks"); - storage_keys.push(storage_key); - parachain_heads.push((*parachain, parachain_head.hash())) - } - } - - // generate heads storage proof - let proof = record_all_trie_keys::, _>(&mdb, &state_root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); - - let (relay_block_number, relay_block_hash) = - insert_header_to_grandpa_pallet::(state_root); - - (relay_block_number, relay_block_hash, ParaHeadsProof { storage_proof: proof }, parachain_heads) -} diff --git a/deployments/bridges/kusama-polkadot/README.md b/deployments/bridges/kusama-polkadot/README.md new file mode 100644 index 0000000000000000000000000000000000000000..27a55a023839bf848fb14484a88f65d72b865d7d --- /dev/null +++ b/deployments/bridges/kusama-polkadot/README.md @@ -0,0 +1,23 @@ +# Kusama Bridge Hub <> Polkadot Bridge Hub deployments + +This folder contains some information and useful stuff from our other test deployment - between Kusama and Polkadot +bridge hubs. The code and other helpful information can be found in +[this document](https://github.com/paritytech/polkadot-sdk/blob/master/bridges/docs/polkadot-kusama-bridge-overview.md) +and in the [code](https://github.com/polkadot-fellows/runtimes/tree/main/system-parachains/bridge-hubs). + +## Grafana Alerts and Dashboards + +JSON model for Grafana alerts and dashobards that we use, may be found in the [dasboard/grafana](./dashboard/grafana/) +folder. + +**Dashboards:** +- kusama-polkadot-maintenance-dashboard.json +- relay-kusama-to-polkadot-messages-dashboard.json +- relay-polkadot-to-kusama-messages-dashboard.json + +(exported JSON directly from https://grafana.teleport.parity.io/dashboards/f/eblDiw17z/Bridges) + +**Alerts:** +- bridge-kusama-polkadot-alerts.json https://grafana.teleport.parity.io/alerting/list + +_Note: All json files are formatted with `jq . file.json`._ diff --git a/deployments/bridges/kusama-polkadot/dashboard/grafana/bridge-kusama-polkadot-alerts.json b/deployments/bridges/kusama-polkadot/dashboard/grafana/bridge-kusama-polkadot-alerts.json new file mode 100644 index 0000000000000000000000000000000000000000..eb3b7d339bfb28ce065906066e91ccf7af97a501 --- /dev/null +++ b/deployments/bridges/kusama-polkadot/dashboard/grafana/bridge-kusama-polkadot-alerts.json @@ -0,0 +1,1656 @@ +{ + "apiVersion": 1, + "groups": [ + { + "orgId": 1, + "name": "Bridge Kusama <> Polkadot", + "folder": "bridges", + "interval": "1m", + "rules": [ + { + "uid": "adizmaavld2psc", + "title": "Polkadot -> KusamaBridgeHub finality sync lags (00000001)", + "condition": "D", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "max(increase(Polkadot_to_BridgeHubKusama_Sync_best_source_at_target_block_number{domain=\"parity-chains\"}[24h]))", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "At Polkadot", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "C", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "C" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "A", + "intervalMs": 1000, + "maxDataPoints": 43200, + "reducer": "max", + "refId": "C", + "type": "reduce" + } + }, + { + "refId": "D", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 5000 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "D" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "C", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "D", + "type": "threshold" + } + } + ], + "dasboardUid": "zqjpkXxnk", + "panelId": 2, + "noDataState": "OK", + "execErrState": "OK", + "for": "5m", + "annotations": { + "__dashboardUid__": "zqjpkXxnk", + "__panelId__": "2", + "summary": "Less than 5000 Polkadot headers (~1/2 era) have been synced to KusamaBridgeHub in last 25 hours. Relay is not running?" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "cdiznm0i2tslca", + "title": "PolkadotBridgeHub -> KusamaBridgeHub delivery lags (00000001)", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "((vector(0) and ((BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(1)) + on () increase(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[10m]) * on () ((vector(1) and ((BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(0))", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "Undelivered messages", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 600, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 1, + 0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "hide": false, + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "zqjpkXxnk", + "panelId": 14, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "zqjpkXxnk", + "__panelId__": "14", + "summary": "Messages from PolkadotBridgeHub to KusamaBridgeHub (00000001) are either not delivered, or are delivered with lags" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "adizouqsgd62od", + "title": "PolkadotBridgeHub -> KusamaBridgeHub confirmation lags (00000001)", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0))", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "Unconfirmed messages", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 50, + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "zqjpkXxnk", + "panelId": 16, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "zqjpkXxnk", + "__panelId__": "16", + "summary": "Messages from PolkadotBridgeHub to KusamaBridgeHub (00000001) are either not confirmed, or are confirmed with lags" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "fdizp9l7o5rswf", + "title": "PolkadotBridgeHub -> KusamaBridgeHub reward lags (00000002)", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_confirmed\"}[2m]) OR on() vector(0))", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "Unconfirmed rewards", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 10, + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "hide": false, + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "zqjpkXxnk", + "panelId": 18, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "zqjpkXxnk", + "__panelId__": "18", + "summary": "Rewards for messages from PolkadotBridgeHub to KusamaBridgeHub (00000001) are either not confirmed, or are confirmed with lags" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "bdizqaq47emf4f", + "title": "Kusama -> PolkadotBridgeHub finality sync lags (00000001)", + "condition": "D", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "max(increase(Kusama_to_BridgeHubPolkadot_Sync_best_source_at_target_block_number{domain=\"parity-chains\"}[24h]))", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "At Kusama", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "C", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "C" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "A", + "intervalMs": 1000, + "maxDataPoints": 43200, + "reducer": "max", + "refId": "C", + "type": "reduce" + } + }, + { + "refId": "D", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 5000 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "D" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "C", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "D", + "type": "threshold" + } + } + ], + "dasboardUid": "tkpc6_bnk", + "panelId": 6, + "noDataState": "OK", + "execErrState": "OK", + "for": "5m", + "annotations": { + "__dashboardUid__": "tkpc6_bnk", + "__panelId__": "6", + "summary": "Less than 5000 Kusama headers (~1/2 era) have been synced to PolkadotBridgeHub in last 25 hours. Relay is not running?" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "adizvdppi4cu8b", + "title": "KusamaBridgeHub -> PolkadotBridgeHub delivery lags (00000001)", + "condition": "A", + "data": [ + { + "refId": "B", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "((vector(0) and ((BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(1)) + on () increase(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[10m]) * on () ((vector(1) and ((BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(0))", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "1 if all messages are delivered. Otherwise - number of delivered messages in last 10m", + "maxDataPoints": 43200, + "range": true, + "refId": "B" + } + }, + { + "refId": "A", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 1, + 0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "A", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "tkpc6_bnk", + "panelId": 12, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "tkpc6_bnk", + "__panelId__": "12", + "summary": "Messages from KusamaBridgeHub to PolkadotBridgeHub (00000001) are either not delivered, or are delivered with lags" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "ddizvjxnpwa2ob", + "title": "KusamaBridgeHub -> PolkadotBridgeHub confirmation lags (00000001)", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(KusamaBridgeHub_to_PolkadotBridgeHub_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0)) - scalar(max_over_time(KusamaBridgeHub_to_PolkadotBridgeHub_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0))", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "Unconfirmed messages", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 50, + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "hide": false, + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "tkpc6_bnk", + "panelId": 14, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "tkpc6_bnk", + "__panelId__": "14" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "fdizvp3bz6oe8c", + "title": "KusamaBridgeHub -> PolkadotBridgeHub reward lags (00000002)", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_confirmed\"}[2m]) OR on() vector(0))", + "interval": "", + "intervalMs": 30000, + "legendFormat": "Unconfirmed rewards", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 10, + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "hide": false, + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "tkpc6_bnk", + "panelId": 15, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "tkpc6_bnk", + "__panelId__": "15", + "summary": "Rewards for messages from KusamaBridgeHub to PolkadotBridgeHub (00000001) are either not confirmed, or are confirmed with lags" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "edizwf9kbhhxcc", + "title": "KusamaBridgeHub <> PolkadotBridgeHub relay (00000001) node is down", + "condition": "C", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": false, + "expr": "up{domain=\"parity-chains\",container=\"bridges-common-relay\"}", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "Is relay running", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "A", + "intervalMs": 1000, + "maxDataPoints": 43200, + "reducer": "min", + "refId": "B", + "type": "reduce" + } + }, + { + "refId": "C", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "C" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "B", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "C", + "type": "threshold" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 16, + "noDataState": "OK", + "execErrState": "OK", + "for": "5m", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "16", + "summary": "KusamaBridgeHub <> PolkadotBridgeHub relay (00000001) node is down" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "adizwlq6yk83kc", + "title": "Version guard has aborted KusamaBridgeHub <> PolkadotBridgeHub relay (00000001)", + "condition": "C", + "data": [ + { + "refId": "A", + "queryType": "range", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "P7028671862427D8D", + "model": { + "datasource": { + "type": "loki", + "uid": "P7028671862427D8D" + }, + "editorMode": "code", + "expr": "count_over_time({container=\"bridges-common-relay\"} |= `Aborting relay` [1m])", + "intervalMs": 1000, + "legendFormat": "Errors per minute", + "maxDataPoints": 43200, + "queryType": "range", + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "A", + "intervalMs": 1000, + "maxDataPoints": 43200, + "reducer": "max", + "refId": "B", + "type": "reduce" + } + }, + { + "refId": "C", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "C" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "B", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "C", + "type": "threshold" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 11, + "noDataState": "OK", + "execErrState": "OK", + "for": "0s", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "11", + "summary": "The KusamaBridgeHub <> PolkadotBridgeHub relay (00000001) has been aborted by version guard - i.e. one of chains has been upgraded and relay wasn't redeployed" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "fdizwsne5dz40b", + "title": "Kusama headers mismatch", + "condition": "C", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Kusama_to_BridgeHubPolkadot_Sync_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "interval": "", + "intervalMs": 30000, + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "A", + "intervalMs": 1000, + "maxDataPoints": 43200, + "reducer": "last", + "refId": "B", + "type": "reduce" + } + }, + { + "refId": "C", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "C" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "B", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "C", + "type": "threshold" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 12, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "12", + "summary": "Best Kusama header at BridgeHubPolkadot (00000001) doesn't match the same header at Kusama" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "ddizwvw3dlzi8e", + "title": "Polkadot headers mismatch", + "condition": "C", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Polkadot_to_BridgeHubKusama_Sync_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "interval": "", + "intervalMs": 30000, + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "B" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "A", + "intervalMs": 1000, + "maxDataPoints": 43200, + "reducer": "last", + "refId": "B", + "type": "reduce" + } + }, + { + "refId": "C", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "C" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "B", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "C", + "type": "threshold" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 13, + "noDataState": "NoData", + "execErrState": "Error", + "for": "10m", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "13", + "summary": "Best Polkadot header at BridgeHubKusama (00000001) doesn't match the same header at Polkadot" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "bdizx0xdiomwwc", + "title": "BridgeHubKusama headers mismatch", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "interval": "", + "intervalMs": 30000, + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 0, + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "hide": false, + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 2, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "2", + "summary": "Best BridgeHubKusama header at BridgeHubPolkadot (00000001) doesn't match the same header at BridgeHubKusama" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "fdizx4hrhg2yod", + "title": "BridgeHubPolkadot headers mismatch", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "interval": "", + "intervalMs": 30000, + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 0, + 0 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "max" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "hide": false, + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 3, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "3", + "summary": "Best BridgeHubPolkadot header at BridgeHubKusama (00000001) doesn't match the same header at BridgeHubPolkadot" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "cdizxaawyvldsb", + "title": "Relay balances at KusamaBridgeHub", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "last_over_time(at_BridgeHubKusama_relay_BridgeHubPolkadotMessages_balance{domain=\"parity-chains\"}[1h])", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "Messages Relay Balance", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 2, + 0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 5, + "noDataState": "NoData", + "execErrState": "Error", + "for": "10m", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "5", + "summary": "With-PolkadotBridgeHub messages relay balance at KusamaBridgeHub (00000001) is too low" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + }, + { + "uid": "fdizxtuxuza4gd", + "title": "Relay balances at PolkadotBridgeHub", + "condition": "B", + "data": [ + { + "refId": "A", + "relativeTimeRange": { + "from": 21600, + "to": 0 + }, + "datasourceUid": "PC96415006F908B67", + "model": { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "last_over_time(at_BridgeHubPolkadot_relay_BridgeHubKusamaMessages_balance{domain=\"parity-chains\"}[1h])", + "instant": false, + "interval": "", + "intervalMs": 30000, + "legendFormat": "Messages Relay Balance", + "maxDataPoints": 43200, + "range": true, + "refId": "A" + } + }, + { + "refId": "B", + "relativeTimeRange": { + "from": 0, + "to": 0 + }, + "datasourceUid": "__expr__", + "model": { + "conditions": [ + { + "evaluator": { + "params": [ + 10, + 0 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A" + ] + }, + "reducer": { + "params": [], + "type": "last" + }, + "type": "query" + } + ], + "datasource": { + "name": "Expression", + "type": "__expr__", + "uid": "__expr__" + }, + "expression": "", + "hide": false, + "intervalMs": 1000, + "maxDataPoints": 43200, + "refId": "B", + "type": "classic_conditions" + } + } + ], + "dasboardUid": "UFsgpJtVz", + "panelId": 6, + "noDataState": "OK", + "execErrState": "OK", + "for": "10m", + "annotations": { + "__dashboardUid__": "UFsgpJtVz", + "__panelId__": "6", + "summary": "With-KusamaBridgeHub messages relay balance at PolkadotBridgeHub (00000001) is too low" + }, + "labels": { + "matrix_room": "FqmgUhjOliBGoncGwm" + }, + "isPaused": false + } + ] + } + ] +} diff --git a/deployments/bridges/kusama-polkadot/dashboard/grafana/kusama-polkadot-maintenance-dashboard.json b/deployments/bridges/kusama-polkadot/dashboard/grafana/kusama-polkadot-maintenance-dashboard.json new file mode 100644 index 0000000000000000000000000000000000000000..2be77fa3677e2f2af5440b755d79bb2b09022726 --- /dev/null +++ b/deployments/bridges/kusama-polkadot/dashboard/grafana/kusama-polkadot-maintenance-dashboard.json @@ -0,0 +1,1026 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 4107, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 5, + "x": 0, + "y": 0 + }, + "id": 8, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": {}, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": false, + "expr": "substrate_relay_build_info{domain=\"parity-chains\"}", + "instant": true, + "legendFormat": "{{commit}}", + "range": false, + "refId": "A" + } + ], + "title": "Relay build commit", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 5, + "y": 0 + }, + "id": 9, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": {}, + "textMode": "name", + "wideLayout": true + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": false, + "expr": "substrate_relay_build_info{domain=\"parity-chains\"}", + "instant": true, + "legendFormat": "{{version}}", + "range": false, + "refId": "A" + } + ], + "title": "Relay build version", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "color": "red", + "index": 1, + "text": "No" + }, + "1": { + "color": "green", + "index": 0, + "text": "Yes" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 9, + "y": 0 + }, + "id": 15, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": false, + "expr": "up{domain=\"parity-chains\",container=\"bridges-common-relay\"}", + "instant": false, + "legendFormat": "Is relay running", + "range": true, + "refId": "A" + } + ], + "title": "Is relay running?", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 5, + "x": 13, + "y": 0 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": false, + "expr": "up{domain=\"parity-chains\",container=\"bridges-common-relay\"}", + "instant": false, + "legendFormat": "Is relay running", + "range": true, + "refId": "A" + } + ], + "title": "Is relay running? (for alert)", + "type": "timeseries" + }, + { + "datasource": { + "type": "loki", + "uid": "P7028671862427D8D" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 18, + "x": 0, + "y": 5 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "P7028671862427D8D" + }, + "editorMode": "code", + "expr": "count_over_time({container=\"bridges-common-relay\"} |~ `(?i)(warn|error|fail)` [1m])", + "legendFormat": "Errors per minute", + "queryType": "range", + "refId": "A" + } + ], + "title": "Relay errors/warnings per minute", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 0, + "y": 14 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Kusama_to_BridgeHubPolkadot_Sync_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "range": true, + "refId": "A" + } + ], + "title": "Kusama headers mismatch", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 9, + "y": 14 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Polkadot_to_BridgeHubKusama_Sync_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "range": true, + "refId": "A" + } + ], + "title": "Polkadot headers mismatch", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 0, + "y": 21 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "range": true, + "refId": "A" + } + ], + "title": "BridgeHubKusama headers mismatch", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 9, + "y": 21 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_is_source_and_source_at_target_using_different_forks{domain=\"parity-chains\"}", + "legendFormat": "Best BridgeHubKusama header at BridgeHubPolkadot doesn't match the same header of BridgeHubKusama", + "range": true, + "refId": "A" + } + ], + "title": "BridgeHubPolkadot headers mismatch", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 9, + "x": 0, + "y": 28 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "at_BridgeHubKusama_relay_BridgeHubPolkadotMessages_balance{domain=\"parity-chains\"}", + "legendFormat": "Messages Relay Balance", + "range": true, + "refId": "A" + } + ], + "title": "Relay balances at KusamaBridgeHub", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 9, + "x": 9, + "y": 28 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "at_BridgeHubPolkadot_relay_BridgeHubKusamaMessages_balance{domain=\"parity-chains\"}", + "legendFormat": "Messages Relay Balance", + "range": true, + "refId": "A" + } + ], + "title": "Relay balances at PolkadotBridgeHub", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "BridgeHubKusama <> BridgeHubPolkadot maintenance (00000001)", + "uid": "UFsgpJtVz", + "version": 6, + "weekStart": "" +} diff --git a/deployments/bridges/kusama-polkadot/dashboard/grafana/relay-kusama-to-polkadot-messages-dashboard.json b/deployments/bridges/kusama-polkadot/dashboard/grafana/relay-kusama-to-polkadot-messages-dashboard.json new file mode 100644 index 0000000000000000000000000000000000000000..d9660a2a1f8d900c3732e1363b17e99c715f1689 --- /dev/null +++ b/deployments/bridges/kusama-polkadot/dashboard/grafana/relay-kusama-to-polkadot-messages-dashboard.json @@ -0,0 +1,982 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 4105, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Kusama_to_BridgeHubPolkadot_Sync_best_source_block_number{domain=\"parity-chains\"}", + "legendFormat": "At Kusama", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Kusama_to_BridgeHubPolkadot_Sync_best_source_at_target_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At BridgeHubPolkadot", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized Kusama headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 7, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Polkadot_to_BridgeHubKusama_Sync_best_source_block_number{domain=\"parity-chains\"}", + "legendFormat": "At Polkadot", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Polkadot_to_BridgeHubKusama_Sync_best_source_at_target_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At BridgeHubKusama", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized Polkadot headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": true, + "expr": "BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_best_source_block_number{domain=\"parity-chains\"}", + "interval": "", + "legendFormat": "At KusamaBridgeHub", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_best_source_at_target_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At PolkadotBridgeHub", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized KusamaBridgeHub headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": true, + "expr": "BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_best_target_block_number{domain=\"parity-chains\"}", + "interval": "", + "legendFormat": "At PolkadotBridgeHub", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_best_target_at_source_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At KusamaBridgeHub", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized PolkadotBridgeHub headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "label_replace(label_replace(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\", type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from BridgeHubKusama\", \"type\", \"source_latest_generated\"), \"type\", \"Latest BridgeHubKusama message received by BridgeHubPolkadot\", \"type\", \"target_latest_received\")", + "legendFormat": "{{type}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "increase(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\", type=~\"source_latest_generated\"}[24h])", + "hide": true, + "legendFormat": "Messages generated in last 24h", + "range": true, + "refId": "B" + } + ], + "title": "Delivery race (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "label_replace(label_replace(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest delivery confirmation from BridgeHubPolkadot to BridgeHubKusama\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest BridgeHubKusama message received by BridgeHubPolkadot\", \"type\", \"target_latest_received\")", + "legendFormat": "{{type}}", + "range": true, + "refId": "A" + } + ], + "title": "Confirmations race (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 16 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0))", + "legendFormat": "Undelivered messages", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "((vector(0) and ((BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(1)) + on () increase(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[10m]) * on () ((vector(1) and ((BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(0))", + "hide": true, + "legendFormat": "1 if all messages are delivered. Otherwise - number of delivered messages in last 10m", + "range": true, + "refId": "B" + } + ], + "title": "Delivery race lags (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 16 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(KusamaBridgeHub_to_PolkadotBridgeHub_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0)) - scalar(max_over_time(KusamaBridgeHub_to_PolkadotBridgeHub_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0))", + "legendFormat": "Unconfirmed messages", + "range": true, + "refId": "A" + } + ], + "title": "Confirmations race lags (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 16 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_confirmed\"}[2m]) OR on() vector(0))", + "legendFormat": "Unconfirmed rewards", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "(scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_confirmed\"}[2m]) OR on() vector(0))) * (max_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0) > bool min_over_time(BridgeHubKusama_to_BridgeHubPolkadot_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0))", + "hide": true, + "legendFormat": "__auto", + "range": true, + "refId": "B" + } + ], + "title": "Reward lags (00000001)", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "BridgeHubKusama to BridgeHubPolkadot (00000001)", + "uid": "tkpc6_bnk", + "version": 2, + "weekStart": "" +} diff --git a/deployments/bridges/kusama-polkadot/dashboard/grafana/relay-polkadot-to-kusama-messages-dashboard.json b/deployments/bridges/kusama-polkadot/dashboard/grafana/relay-polkadot-to-kusama-messages-dashboard.json new file mode 100644 index 0000000000000000000000000000000000000000..4fbe9cc09c1bd900c45b823baf97094dda840868 --- /dev/null +++ b/deployments/bridges/kusama-polkadot/dashboard/grafana/relay-polkadot-to-kusama-messages-dashboard.json @@ -0,0 +1,970 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 4106, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Polkadot_to_BridgeHubKusama_Sync_best_source_block_number{domain=\"parity-chains\"}", + "legendFormat": "At Polkadot", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Polkadot_to_BridgeHubKusama_Sync_best_source_at_target_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At BridgeHubKusama", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized Polkadot headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Kusama_to_BridgeHubPolkadot_Sync_best_source_block_number{domain=\"parity-chains\"}", + "legendFormat": "At Kusama", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "Kusama_to_BridgeHubPolkadot_Sync_best_source_at_target_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At PolkadotBridgeHub", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized Kusama headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": true, + "expr": "BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_best_source_block_number{domain=\"parity-chains\"}", + "interval": "", + "legendFormat": "At PolkadotBridgeHub", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_best_source_at_target_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At KusamaBridgeHub", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized PolkadotBridgeHub headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "exemplar": true, + "expr": "BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_best_target_block_number{domain=\"parity-chains\"}", + "interval": "", + "legendFormat": "At KusamaBridgeHub", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_best_target_at_source_block_number{domain=\"parity-chains\"}", + "hide": false, + "legendFormat": "At PolkadotBridgeHub", + "range": true, + "refId": "B" + } + ], + "title": "Best finalized KusamaBridgeHub headers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "label_replace(label_replace(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=~\"source_latest_generated|target_latest_received\"}, \"type\", \"Latest message sent from BridgeHubPolkadot\", \"type\", \"source_latest_generated\"), \"type\", \"Latest BridgeHubPolkadot message received by BridgeHubKusama\", \"type\", \"target_latest_received\")", + "legendFormat": "{{type}}", + "range": true, + "refId": "A" + } + ], + "title": "Delivery race (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "label_replace(label_replace(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=~\"source_latest_confirmed|target_latest_received\"}, \"type\", \"Latest delivery confirmation from BridgeHubKusama to BridgeHubPolkadot\", \"type\", \"source_latest_confirmed\"), \"type\", \"Latest BridgeHubPolkadot message received by BridgeHubKusama\", \"type\", \"target_latest_received\")", + "legendFormat": "{{type}}", + "range": true, + "refId": "A" + } + ], + "title": "Confirmations race (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 16 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0))", + "legendFormat": "Undelivered messages", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "((vector(0) and ((BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(1)) + on () increase(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[10m]) * on () ((vector(1) and ((BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_generated\"} > on () BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}))) or vector(0))", + "hide": true, + "legendFormat": "1 if all messages are delivered. Otherwise - number of delivered messages in last 10m", + "range": true, + "refId": "B" + } + ], + "title": "Delivery race lags (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 16 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0))", + "legendFormat": "Unconfirmed messages", + "range": true, + "refId": "A" + } + ], + "title": "Confirmations race lags (00000001)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 16 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_confirmed\"}[2m]) OR on() vector(0))", + "legendFormat": "Unconfirmed rewards", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PC96415006F908B67" + }, + "editorMode": "code", + "expr": "(scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"source_latest_confirmed\"}[2m]) OR on() vector(0)) - scalar(max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_confirmed\"}[2m]) OR on() vector(0))) * (max_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0) > bool min_over_time(BridgeHubPolkadot_to_BridgeHubKusama_MessageLane_00000001_lane_state_nonces{domain=\"parity-chains\",type=\"target_latest_received\"}[2m]) OR on() vector(0))", + "hide": true, + "legendFormat": "__auto", + "range": true, + "refId": "B" + } + ], + "title": "Reward lags (00000001)", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "BridgeHubPolkadot to BridgeHubKusama (00000001)", + "uid": "zqjpkXxnk", + "version": 2, + "weekStart": "" +} diff --git a/docs/bridge-relayers-claim-rewards.png b/docs/bridge-relayers-claim-rewards.png deleted file mode 100644 index d56b8dd871e8445e7cab49517123b0092ce09137..0000000000000000000000000000000000000000 Binary files a/docs/bridge-relayers-claim-rewards.png and /dev/null differ diff --git a/docs/bridge-relayers-deregister.png b/docs/bridge-relayers-deregister.png deleted file mode 100644 index e7706cee78916d7e2bbcfd7ee4a1a046a0450f87..0000000000000000000000000000000000000000 Binary files a/docs/bridge-relayers-deregister.png and /dev/null differ diff --git a/docs/bridge-relayers-register.png b/docs/bridge-relayers-register.png deleted file mode 100644 index e9e3e1b5ac87c5c9d31477c696912fcbc93b0c78..0000000000000000000000000000000000000000 Binary files a/docs/bridge-relayers-register.png and /dev/null differ diff --git a/docs/complex-relay.html b/docs/complex-relay.html deleted file mode 100644 index 21524bfd04913c108372fe65e006b67a94fc4d31..0000000000000000000000000000000000000000 --- a/docs/complex-relay.html +++ /dev/null @@ -1,85 +0,0 @@ - - - - - - Complex Relay - - -

Complex Relay

-

- Both Source Chain and Target Chains have Bridge Messages pallets deployed. They also have required - finality pallets deployed - we don't care about finality type here - they can be either Bridge GRANDPA, - or Bridge Parachains finality pallets, or any combination of those.
-

-

- There are 4-6 relayer subprocesses inside the Complex Relayer. They include two message relayers, - serving the lane in both directions and 2-4 Complex Relayers (depending on the finality type of Source - and Target Chains).
-

-

- The following diagram shows the way the complex relayer serves the lane in single direction. Everything - below may be applied to the opposite direction if you'll swap the Source and Target Chains. -

-
- sequenceDiagram - participant Source Chain - participant Complex Relayer - participant Target Chain - - Note right of Source Chain: Finalized: 480, Target Finalized: 50, Sent Messages: 42, Confirmed Messages: 42 - Note left of Target Chain: Finalized: 60, Source Finalized: 420, Received Messages: 42 - - Source Chain ->> Source Chain: someone Sends Message 43 - Source Chain ->> Source Chain: Import and Finalize Block 481 - - Source Chain ->> Complex Relayer: notes new outbound message 43 at Source Chain Block 481 - Note right of Complex Relayer: can't deliver message 43, Source Chain Block 481 is not relayed - Complex Relayer ->> Complex Relayer: asks on-demand Finality Relayer to relay Source Chain Block 481 - - Source Chain ->> Complex Relayer: Read Finality Proof of Block 481 - Complex Relayer ->> Target Chain: Submit Finality Proof of Block 481 - Target Chain ->> Target Chain: Import and Finalize Block 61 - Note left of Target Chain: Finalized: 61, Source Finalized: 481, Received Messages: 42 - - Source Chain ->> Complex Relayer: Read Proof of Message 43 at Block 481 - Complex Relayer ->> Target Chain: Submit Proof of Message 43 at Block 481 - Target Chain ->> Target Chain: Import and Finalize Block 62 - Note left of Target Chain: Finalized: 62, Source Finalized: 481, Received Messages: { rewarded: 42, messages-relayer-account: [43] } - - Target Chain ->> Complex Relayer: notes new unrewarded relayer at Target Chain Block 62 - Note right of Complex Relayer: can't relay delivery confirmations because Target Chain Block 62 is not relayed - Complex Relayer ->> Complex Relayer: asks on-demand Finality Relayer to relay Target Chain Block 62 - - Target Chain ->> Complex Relayer: Read Finality Proof of Block 62 - Complex Relayer ->> Source Chain: Submit Finality Proof of Block 62 - Source Chain ->> Source Chain: Import and Finalize Block 482 - Note right of Source Chain: Finalized: 482, Target Finalized: 62, Confirmed Messages: 42 - - Target Chain ->> Complex Relayer: Read Proof of Message 43 Delivery at Block 62 - Complex Relayer ->> Source Chain: Submit Proof of Message 43 Delivery at Block 612 - Source Chain ->> Source Chain: rewards messages-relayer-account for delivering message [43] - Source Chain ->> Source Chain: prune delivered message 43 from runtime storage - Note right of Source Chain: Finalized: 482, Target Finalized: 61, Confirmed Messages: 43 - - Source Chain ->> Source Chain: someone Sends Message 44 - Source Chain ->> Source Chain: Import and Finalize Block 483 - - Source Chain ->> Complex Relayer: notes new outbound message 44 at Source Chain Block 483 and new confirmed message 43 - Note right of Complex Relayer: can't deliver message 44, Source Chain Block 483 is not relayed - Complex Relayer ->> Complex Relayer: asks on-demand Finality Relayer to relay Source Chain Block 483 - - Source Chain ->> Complex Relayer: Read Finality Proof of Block 483 - Complex Relayer ->> Target Chain: Submit Finality Proof of Block 483 - Target Chain ->> Target Chain: Import and Finalize Block 63 - Note left of Target Chain: Finalized: 63, Source Finalized: 483, Received Messages: { rewarded: 42, messages-relayer-account: [43] } - - Source Chain ->> Complex Relayer: Read Proof of Message 44 and Proof of Message 43 reward at Block 483 - Complex Relayer ->> Target Chain: Submit Proof of Message 44 and Proof of Message 43 reward at Block 483 - Target Chain ->> Target Chain: Import and Finalize Block 64 - Note left of Target Chain: Finalized: 64, Source Finalized: 483, Received Messages: { rewarded: 43, messages-relayer-account: [44] }--> -
- - - - diff --git a/docs/grandpa-finality-relay.html b/docs/grandpa-finality-relay.html deleted file mode 100644 index 4136621b1a4bf6ebc0f9f675dc900fd12457bbae..0000000000000000000000000000000000000000 --- a/docs/grandpa-finality-relay.html +++ /dev/null @@ -1,47 +0,0 @@ - - - - - - GRANDPA Finality Relay - - -

GRANDPA Finality Relay

-

- Source Chain is running GRANDPA Finality Gadget. Bridge GRANDPA finality pallet is deployed at - Target Chain runtime. Relayer is configured to relay Source Chain finality to Target Chain. -

-
- sequenceDiagram - participant Source Chain - participant Relayer - participant Target Chain - Note left of Source Chain: Best: 500, Finalized: 480, Authorities Set Index: 42 - Note right of Target Chain: Uninitialized - - Source Chain ->> Relayer: Read Initialization Data - Relayer ->> Target Chain: Initialize Bridge GRANDPA Finality Pallet - Note right of Target Chain: Finalized: 480, Authorities Set Index: 42 - - Source Chain ->> Source Chain: Import Block 501 - Source Chain ->> Source Chain: Import Block 502 - Source Chain ->> Source Chain: Finalize Block 495 - Source Chain ->> Relayer: Read Finality Proof of Block 495 - Relayer ->> Target Chain: Finality Proof of Block 495 - Note right of Target Chain: Finalized: 495, Authorities Set Index: 42 - - Source Chain ->> Source Chain: Import Block 503 that changes Authorities Set to 43 - Source Chain ->> Source Chain: Finalize Block 500 - Note left of Relayer: Relayer Misses Finality Notification for Block 500 - - Source Chain ->> Source Chain: Import Block 504 - Source Chain ->> Source Chain: Finalize Mandatory Block 503 - Source Chain ->> Source Chain: Finalize Block 504 - Source Chain ->> Relayer: Read Finality Proof of Mandatory Block 503 - Relayer ->> Target Chain: Finality Proof of Block 503 - Note right of Target Chain: Finalized: 503, Authorities Set Index: 43 -
- - - - diff --git a/docs/high-level-overview.md b/docs/high-level-overview.md deleted file mode 100644 index d6d6fb3f0996dd15d4fc2987deacf79e4ecd4e5f..0000000000000000000000000000000000000000 --- a/docs/high-level-overview.md +++ /dev/null @@ -1,184 +0,0 @@ -# High-Level Bridge Documentation - -This document gives a brief, abstract description of main components that may be found in this repository. If you want -to see how we're using them to build Rococo <> Westend (Kusama <> Polkadot) bridge, please refer to the [Polkadot <> -Kusama Bridge](./polkadot-kusama-bridge-overview.md). - -## Purpose - -This repo contains all components required to build a trustless connection between standalone Substrate chains, that are -using GRANDPA finality, their parachains or any combination of those. On top of this connection, we offer a messaging -pallet that provides means to organize messages exchange. - -On top of that layered infrastructure, anyone may build their own bridge applications - e.g. [XCM -messaging](./polkadot-kusama-bridge-overview.md), [encoded calls -messaging](https://github.com/paritytech/parity-bridges-common/releases/tag/encoded-calls-messaging) and so on. - -## Terminology - -Even though we support (and require) two-way bridging, the documentation will generally talk about a one-sided -interaction. That's to say, we will only talk about syncing finality proofs and messages from a _source_ chain to a -_target_ chain. This is because the two-sided interaction is really just the one-sided interaction with the source and -target chains switched. - -The bridge has both on-chain (pallets) and offchain (relayers) components. - -## On-chain components - -On-chain bridge components are pallets that are deployed at the chain runtime. Finality pallets require deployment at -the target chain, while messages pallet needs to be deployed at both, source and target chains. - -### Bridge GRANDPA Finality Pallet - -A GRANDPA light client of the source chain built into the target chain's runtime. It provides a "source of truth" about -the source chain headers which have been finalized. This is useful for higher level applications. - -The pallet tracks current GRANDPA authorities set and only accepts finality proofs (GRANDPA justifications), generated -by the current authorities set. The GRANDPA protocol itself requires current authorities set to generate explicit -justification for the header that enacts next authorities set. Such headers and their finality proofs are called -mandatory in the pallet and relayer pays no fee for such headers submission. - -The pallet does not require all headers to be imported or provided. The relayer itself chooses which headers he wants to -submit (with the exception of mandatory headers). - -More: [pallet level documentation and code](../modules/grandpa/). - -### Bridge Parachains Finality Pallet - -Parachains are not supposed to have their own finality, so we can't use bridge GRANDPA pallet to verify their finality -proofs. Instead, they rely on their relay chain finality. The parachain header is considered final, when it is accepted -by the [`paras` -pallet](https://github.com/paritytech/polkadot/tree/1a034bd6de0e76721d19aed02a538bcef0787260/runtime/parachains/src/paras) -at its relay chain. Obviously, the relay chain block, where it is accepted, must also be finalized by the relay chain -GRANDPA gadget. - -That said, the bridge parachains pallet accepts storage proof of one or several parachain heads, inserted to the -[`Heads`](https://github.com/paritytech/polkadot/blob/1a034bd6de0e76721d19aed02a538bcef0787260/runtime/parachains/src/paras/mod.rs#L642) -map of the [`paras` -pallet](https://github.com/paritytech/polkadot/tree/1a034bd6de0e76721d19aed02a538bcef0787260/runtime/parachains/src/paras). -To verify this storage proof, the pallet uses relay chain header, imported earlier by the bridge GRANDPA pallet. - -The pallet may track multiple parachains at once and those parachains may use different primitives. So the parachain -header decoding never happens at the pallet level. For maintaining the headers order, the pallet uses relay chain header -number. - -More: [pallet level documentation and code](../modules/parachains/). - -### Bridge Messages Pallet - -The pallet is responsible for queuing messages at the source chain and receiving the messages proofs at the target -chain. The messages are sent to the particular _lane_, where they are guaranteed to be received in the same order they -are sent. The pallet supports many lanes. - -The lane has two ends. Outbound lane end is storing number of messages that have been sent and the number of messages -that have been received. Inbound lane end stores the number of messages that have been received and also a map that maps -messages to relayers that have delivered those messages to the target chain. - -The pallet has three main entrypoints: -- the `send_message` may be used by the other runtime pallets to send the messages; -- the `receive_messages_proof` is responsible for parsing the messages proof and handing messages over to the dispatch -code; -- the `receive_messages_delivery_proof` is responsible for parsing the messages delivery proof and rewarding relayers -that have delivered the message. - -Many things are abstracted by the pallet: -- the message itself may mean anything, the pallet doesn't care about its content; -- the message dispatch happens during delivery, but it is decoupled from the pallet code; -- the messages proof and messages delivery proof are verified outside of the pallet; -- the relayers incentivization scheme is defined outside of the pallet. - -Outside of the messaging pallet, we have a set of adapters, where messages and delivery proofs are regular storage -proofs. The proofs are generated at the bridged chain and require bridged chain finality. So messages pallet, in this -case, depends on one of the finality pallets. The messages are XCM messages and we are using XCM executor to dispatch -them on receival. You may find more info in [Polkadot <> Kusama Bridge](./polkadot-kusama-bridge-overview.md) document. - -More: [pallet level documentation and code](../modules/messages/). - -### Bridge Relayers Pallet - -The pallet is quite simple. It just registers relayer rewards and has an entrypoint to collect them. When the rewards -are registered and the reward amount is configured outside of the pallet. - -More: [pallet level documentation and code](../modules/relayers/). - -## Offchain Components - -Offchain bridge components are separate processes, called relayers. Relayers are connected both to the source chain and -target chain nodes. Relayers are reading state of the source chain, compare it to the state of the target chain and, if -state at target chain needs to be updated, submits target chain transaction. - -### GRANDPA Finality Relay - -The task of relay is to submit source chain GRANDPA justifications and their corresponding headers to the Bridge GRANDPA -Finality Pallet, deployed at the target chain. For that, the relay subscribes to the source chain GRANDPA justifications -stream and submits every new justification it sees to the target chain GRANDPA light client. In addition, relay is -searching for mandatory headers and submits their justifications - without that the pallet will be unable to move -forward. - -More: [GRANDPA Finality Relay Sequence Diagram](./grandpa-finality-relay.html), [pallet level documentation and -code](../relays/finality/). - -### Parachains Finality Relay - -The relay connects to the source _relay_ chain and the target chain nodes. It doesn't need to connect to the tracked -parachain nodes. The relay looks at the -[`Heads`](https://github.com/paritytech/polkadot/blob/1a034bd6de0e76721d19aed02a538bcef0787260/runtime/parachains/src/paras/mod.rs#L642) -map of the [`paras` -pallet](https://github.com/paritytech/polkadot/tree/1a034bd6de0e76721d19aed02a538bcef0787260/runtime/parachains/src/paras) -in source chain, and compares the value with the best parachain head, stored in the bridge parachains pallet at the -target chain. If new parachain head appears at the relay chain block `B`, the relay process **waits** until header `B` -or one of its ancestors appears at the target chain. Once it is available, the storage proof of the map entry is -generated and is submitted to the target chain. - -As its on-chain component (which requires bridge GRANDPA pallet to be deployed nearby), the parachains finality relay -requires GRANDPA finality relay to be running in parallel. Without it, the header `B` or any of its children's finality -at source won't be relayed at target, and target chain won't be able to verify generated storage proof. - -More: [Parachains Finality Relay Sequence Diagram](./parachains-finality-relay.html), [code](../relays/parachains/). - -### Messages Relay - -Messages relay is actually two relays that are running in a single process: messages delivery relay and delivery -confirmation relay. Even though they are more complex and have many caveats, the overall algorithm is the same as in -other relays. - -Message delivery relay connects to the source chain and looks at the outbound lane end, waiting until new messages are -queued there. Once they appear at the source block `B`, the relay start waiting for the block `B` or its descendant -appear at the target chain. Then the messages storage proof is generated and submitted to the bridge messages pallet at -the target chain. In addition, the transaction may include the storage proof of the outbound lane state - that proves -that relayer rewards have been paid and this data (map of relay accounts to the delivered messages) may be pruned from -the inbound lane state at the target chain. - -Delivery confirmation relay connects to the target chain and starts watching the inbound lane end. When new messages are -delivered to the target chain, the corresponding _source chain account_ is inserted to the map in the inbound lane data. -Relay detects that, say, at the target chain block `B` and waits until that block or its descendant appears at the -source chain. Once that happens, the relay crafts a storage proof of that data and sends it to the messages pallet, -deployed at the source chain. - -As you can see, the messages relay also requires finality relay to be operating in parallel. Since messages relay -submits transactions to both source and target chains, it requires both _source-to-target_ and _target-to-source_ -finality relays. They can be GRANDPA finality relays or GRANDPA+parachains finality relays, depending on the type of -connected chain. - -More: [Messages Relay Sequence Diagram](./messages-relay.html), [pallet level documentation and -code](../relays/messages/). - -### Complex Relay - -Every relay transaction has its cost. The only transaction, that is "free" to relayer is when the mandatory GRANDPA -header is submitted. The relay that feeds the bridge with every relay chain and/or parachain head it sees, will have to -pay a (quite large) cost. And if no messages are sent through the bridge, that is just waste of money. - -We have a special relay mode, called _complex relay_, where relay mostly sleeps and only submits transactions that are -required for the messages/confirmations delivery. This mode starts two message relays (in both directions). All required -finality relays are also started in a special _on-demand_ mode. In this mode they do not submit any headers without -special request. As always, the only exception is when GRANDPA finality relay sees the mandatory header - it is -submitted without such request. - -The message relays are watching their lanes and when, at some block `B`, they see new messages/confirmations to be -delivered, they are asking on-demand relays to relay this block `B`. On-demand relays does that and then message relay -may perform its job. If on-demand relay is a parachain finality relay, it also runs its own on-demand GRANDPA relay, -which is used to relay required relay chain headers. - -More: [Complex Relay Sequence Diagram](./complex-relay.html), -[code](../relays/bin-substrate/src/cli/relay_headers_and_messages/). diff --git a/docs/messages-relay.html b/docs/messages-relay.html deleted file mode 100644 index c4dab9901e03bc704d4e5241d343331b76731c2c..0000000000000000000000000000000000000000 --- a/docs/messages-relay.html +++ /dev/null @@ -1,78 +0,0 @@ - - - - - - Messages Relay - - -

Messages Relay

-

- Both Source Chain and Target Chains have Bridge Messages pallets deployed. They also have required - finality pallets deployed - we don't care about finality type here - they can be either Bridge GRANDPA, - or Bridge Parachains finality pallets, or any combination of those. -

-

- Finality Relayer represents two actual relayers - one relays Source Chain Finality to Target Chain. - And another one relays Target Chain Finality to Source Chain. -

-
- sequenceDiagram - participant Source Chain - participant Finality Relayer - participant Messages Relayer - participant Target Chain - - Note right of Source Chain: Finalized: 480, Target Finalized: 50, Sent Messages: 42, Confirmed Messages: 42 - Note left of Target Chain: Finalized: 60, Source Finalized: 420, Received Messages: 42 - - Source Chain ->> Source Chain: someone Sends Message 43 - Source Chain ->> Source Chain: Import and Finalize Block 481 - - Source Chain ->> Messages Relayer: notes new outbound message 43 at Source Chain Block 481 - Note right of Messages Relayer: can't deliver message 43, Source Chain Block 481 is not relayed - - Source Chain ->> Finality Relayer: Read Finality Proof of Block 481 - Finality Relayer ->> Target Chain: Submit Finality Proof of Block 481 - Target Chain ->> Target Chain: Import and Finalize Block 61 - Note left of Target Chain: Finalized: 61, Source Finalized: 481, Received Messages: 42 - - Source Chain ->> Messages Relayer: Read Proof of Message 43 at Block 481 - Messages Relayer ->> Target Chain: Submit Proof of Message 43 at Block 481 - Target Chain ->> Target Chain: Import and Finalize Block 62 - Note left of Target Chain: Finalized: 62, Source Finalized: 481, Received Messages: { rewarded: 42, messages-relayer-account: [43] } - - Target Chain ->> Messages Relayer: notes new unrewarded relayer at Target Chain Block 62 - Note right of Messages Relayer: can't relay delivery confirmations because Target Chain Block 62 is not relayed - - Target Chain ->> Finality Relayer: Read Finality Proof of Block 62 - Finality Relayer ->> Source Chain: Submit Finality Proof of Block 62 - Source Chain ->> Source Chain: Import and Finalize Block 482 - Note right of Source Chain: Finalized: 482, Target Finalized: 62, Confirmed Messages: 42 - - Target Chain ->> Messages Relayer: Read Proof of Message 43 Delivery at Block 62 - Messages Relayer ->> Source Chain: Submit Proof of Message 43 Delivery at Block 612 - Source Chain ->> Source Chain: rewards messages-relayer-account for delivering message [43] - Source Chain ->> Source Chain: prune delivered message 43 from runtime storage - Note right of Source Chain: Finalized: 482, Target Finalized: 61, Confirmed Messages: 43 - - Source Chain ->> Source Chain: someone Sends Message 44 - Source Chain ->> Source Chain: Import and Finalize Block 483 - - Source Chain ->> Messages Relayer: notes new outbound message 44 at Source Chain Block 483 and new confirmed message 43 - Note right of Messages Relayer: can't deliver message 44, Source Chain Block 483 is not relayed - - Source Chain ->> Finality Relayer: Read Finality Proof of Block 483 - Finality Relayer ->> Target Chain: Submit Finality Proof of Block 483 - Target Chain ->> Target Chain: Import and Finalize Block 63 - Note left of Target Chain: Finalized: 63, Source Finalized: 483, Received Messages: { rewarded: 42, messages-relayer-account: [43] } - - Source Chain ->> Messages Relayer: Read Proof of Message 44 and Proof of Message 43 reward at Block 483 - Messages Relayer ->> Target Chain: Submit Proof of Message 44 and Proof of Message 43 reward at Block 483 - Target Chain ->> Target Chain: Import and Finalize Block 64 - Note left of Target Chain: Finalized: 64, Source Finalized: 483, Received Messages: { rewarded: 43, messages-relayer-account: [44] } -
- - - - diff --git a/docs/parachains-finality-relay.html b/docs/parachains-finality-relay.html deleted file mode 100644 index 4fc1392b87dea73cd49cff8fc6ccfe2125d5994a..0000000000000000000000000000000000000000 --- a/docs/parachains-finality-relay.html +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - Parachains Finality Relay - - -

Parachains Finality Relay

-

- Source Relay Chain is running GRANDPA Finality Gadget. Source Parachain is a parachain of the Source - Relay Chain. Bridge GRANDPA finality pallet is deployed at Target Chain runtime and is "connected" - to the Source Relay Chain. Bridge Parachains finality pallet is deployed at Target Chain and is - configured to track the Source Parachain. GRANDPA Relayer is configured to relay Source Relay Chain - finality to Target Chain. Parachains Relayer is configured to relay Source Parachain headers finality - to Target Chain. -

-
- sequenceDiagram - participant Source Parachain - participant Source Relay Chain - participant GRANDPA Relayer - participant Parachains Relayer - participant Target Chain - - Note left of Source Parachain: Best: 125 - Note left of Source Relay Chain: Finalized: 500, Best Parachain at Finalized: 120 - Note right of Target Chain: Best Relay: 480, Best Parachain: 110 - - Source Parachain ->> Source Parachain: Import Block 126 - Source Parachain ->> Source Relay Chain: Receives the Parachain block 126 - - Source Relay Chain ->> Source Relay Chain: Import block 501 - Source Relay Chain ->> Source Relay Chain: Finalize block 501 - Note left of Source Relay Chain: Finalized: 501, Best Parachain at Finalized: 126 - - Source Relay Chain ->> Parachains Relayer: notes new Source Parachain Block 126 - Note left of Parachains Relayer: can't relay Source Parachain Block 126, because it requires at least Source Relay Block 501 at Target Chain - - Source Relay Chain ->> Source Relay Chain: Import block 502 - Source Relay Chain ->> Source Relay Chain: Finalize block 502 - - Source Relay Chain ->> GRANDPA Relayer: read GRANDPA Finality Proof of Block 502 - GRANDPA Relayer ->> Target Chain: submit GRANDPA Finality Proof of Block 502 - Note right of Target Chain: Best Relay: 502, Best Parachain: 110 - - Target Chain ->> Parachains Relayer: notes finalized Source Relay Block 502 at Target Chain - Source Relay Chain ->> Parachains Relayer: read Parachain Finality Proof at Relay Block 502 - Parachains Relayer ->> Target Chain: submit Parachain Finality Proof at Relay Block 502 - Note right of Target Chain: Best Relay: 502, Best Parachain: 126 -
- - - - diff --git a/docs/polkadot-kusama-bridge-overview.md b/docs/polkadot-kusama-bridge-overview.md deleted file mode 100644 index 08036f0b0722b869786ae3d0abfc6ae7ea7c2c18..0000000000000000000000000000000000000000 --- a/docs/polkadot-kusama-bridge-overview.md +++ /dev/null @@ -1,129 +0,0 @@ -# Polkadot <> Kusama Bridge Overview - -This document describes how we use all components, described in the [High-Level Bridge -Documentation](./high-level-overview.md), to build the XCM bridge between Kusama and Polkadot. In this case, our -components merely work as a XCM transport (like XCMP/UMP/HRMP), between chains that are not a part of the same consensus -system. - -The overall architecture may be seen in [this diagram](./polkadot-kusama-bridge.html). - -## Bridge Hubs - -All operations at relay chain are expensive. Ideally all non-mandatory transactions must happen on parachains. That's -why we are planning to have two parachains - Polkadot Bridge Hub under Polkadot consensus and Kusama Bridge Hub under -Kusama consensus. - -The Bridge Hub will have all required bridge pallets in its runtime. We hope that later, other teams will be able to use -our bridge hubs too and have their pallets there. - -The Bridge Hub will use the base token of the ecosystem - KSM at Kusama Bridge Hub and DOT at Polkadot Bridge Hub. The -runtime will have minimal set of non-bridge pallets, so there's not much you can do directly on bridge hubs. - -## Connecting Parachains - -You won't be able to directly use bridge hub transactions to send XCM messages over the bridge. Instead, you'll need to -use other parachains transactions, which will use HRMP to deliver messages to the Bridge Hub. The Bridge Hub will just -queue these messages in its outbound lane, which is dedicated to deliver messages between two parachains. - -Our first planned bridge will connect the Polkadot and Kusama Asset Hubs. A bridge between those two parachains would -allow Asset Hub Polkadot accounts to hold wrapped KSM tokens and Asset Hub Kusama accounts to hold wrapped DOT tokens. - -For that bridge (pair of parachains under different consensus systems) we'll be using the lane 00000000. Later, when -other parachains will join the bridge, they will be using other lanes for their messages. - -## Running Relayers - -We are planning to run our own complex relayer for the lane 00000000. The relayer will relay Kusama/Polkadot GRANDPA -justifications to the bridge hubs at the other side. It'll also relay finalized Kusama Bridge Hub and Polkadot Bridge -Hub heads. This will only happen when messages will be queued at hubs. So most of time relayer will be idle. - -There's no any active relayer sets, or something like that. Anyone may start its own relayer and relay queued messages. -We are not against that and, as always, appreciate any community efforts. Of course, running relayer has the cost. Apart -from paying for the CPU and network, the relayer pays for transactions at both sides of the bridge. We have a mechanism -for rewarding relayers. - -### Compensating the Cost of Message Delivery Transactions - -One part of our rewarding scheme is that the cost of message delivery, for honest relayer, is zero. The honest relayer -is the relayer, which is following our rules: - -- we do not reward relayers for submitting GRANDPA finality transactions. The only exception is submitting mandatory - headers (headers which are changing the GRANDPA authorities set) - the cost of such transaction is zero. The relayer - will pay the full cost for submitting all other headers; - -- we do not reward relayers for submitting parachain finality transactions. The relayer will pay the full cost for - submitting parachain finality transactions; - -- we compensate the cost of message delivery transactions that have actually delivered the messages. So if your - transaction has claimed to deliver messages `[42, 43, 44]`, but, because of some reasons, has actually delivered - messages `[42, 43]`, the transaction will be free for relayer. If it has not delivered any messages, then the relayer - pays the full cost of the transaction; - -- we compensate the cost of message delivery and all required finality calls, if they are part of the same - [`frame_utility::batch_all`](https://github.com/paritytech/substrate/blob/891d6a5c870ab88521183facafc811a203bb6541/frame/utility/src/lib.rs#L326) - transaction. Of course, the calls inside the batch must be linked - e.g. the submitted parachain head must be used to - prove messages. Relay header must be used to prove parachain head finality. If one of calls fails, or if they are not - linked together, the relayer pays the full transaction cost. - -Please keep in mind that the fee of "zero-cost" transactions is still withdrawn from the relayer account. But the -compensation is registered in the `pallet_bridge_relayers::RelayerRewards` map at the target bridge hub. The relayer may -later claim all its rewards later, using the `pallet_bridge_relayers::claim_rewards` call. - -*A side note*: why we don't simply set the cost of useful transactions to zero? That's because the bridge has its cost. -If we won't take any fees, it would mean that the sender is not obliged to pay for its messages. And Bridge Hub -collators (and, maybe, "treasury") are not receiving any payment for including transactions. More about this later, in -the [Who is Rewarding Relayers](#who-is-rewarding-relayers) section. - -### Message Delivery Confirmation Rewards - -In addition to the "zero-cost" message delivery transactions, the relayer is also rewarded for: - -- delivering every message. The reward is registered during delivery confirmation transaction at the Source Bridge Hub.; - -- submitting delivery confirmation transaction. The relayer may submit delivery confirmation that e.g. confirms delivery - of four messages, of which the only one (or zero) messages is actually delivered by this relayer. It receives some fee - for confirming messages, delivered by other relayers. - -Both rewards may be claimed using the `pallet_bridge_relayers::claim_rewards` call at the Source Bridge Hub. - -### Who is Rewarding Relayers - -Obviously, there should be someone who is paying relayer rewards. We want bridge transactions to have a cost, so we -can't use fees for rewards. Instead, the parachains using the bridge, use sovereign accounts on both sides of the bridge -to cover relayer rewards. - -Bridged Parachains will have sovereign accounts at bridge hubs. For example, the Kusama Asset Hub will have an account -at the Polkadot Bridge Hub. The Polkadot Asset Hub will have an account at the Kusama Bridge Hub. The sovereign accounts -are used as a source of funds when the relayer is calling the `pallet_bridge_relayers::claim_rewards`. - -Since messages lane is only used by the pair of parachains, there's no collision between different bridges. E.g. Kusama -Asset Hub will only reward relayers that are delivering messages from Kusama Asset Hub. The Kusama Asset Hub sovereign -account is not used to cover rewards of bridging with some other Polkadot Parachain. - -### Multiple Relayers and Rewards - -Our goal is to incentivize running honest relayers. But we have no relayers sets, so at any time anyone may submit -message delivery transaction, hoping that the cost of this transaction will be compensated. So what if some message is -currently queued and two relayers are submitting two identical message delivery transactions at once? Without any -special means, the cost of first included transaction will be compensated and the cost of the other one won't. A honest, -but unlucky relayer will lose some money. In addition, we'll waste some portion of block size and weight, which may be -used by other useful transactions. - -To solve the problem, we have two signed extensions ([generate_bridge_reject_obsolete_headers_and_messages! -{}](../bin/runtime-common/src/lib.rs) and -[RefundRelayerForMessagesFromParachain](../bin/runtime-common/src/refund_relayer_extension.rs)), that are preventing -bridge transactions with obsolete data from including into the block. We are rejecting following transactions: - -- transactions, that are submitting the GRANDPA justification for the best finalized header, or one of its ancestors; - -- transactions, that are submitting the proof of the current best parachain head, or one of its ancestors; - -- transactions, that are delivering already delivered messages. If at least one of messages is not yet delivered, the - transaction is not rejected; - -- transactions, that are confirming delivery of already confirmed messages. If at least one of confirmations is new, the - transaction is not rejected; - -- [`frame_utility::batch_all`](https://github.com/paritytech/substrate/blob/891d6a5c870ab88521183facafc811a203bb6541/frame/utility/src/lib.rs#L326) - transactions, that have both finality and message delivery calls. All restrictions from the [Compensating the Cost of - Message Delivery Transactions](#compensating-the-cost-of-message-delivery-transactions) are applied. diff --git a/docs/polkadot-kusama-bridge.html b/docs/polkadot-kusama-bridge.html deleted file mode 100644 index bf248adb571670e9b9e670b811c6810020bbde75..0000000000000000000000000000000000000000 --- a/docs/polkadot-kusama-bridge.html +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - Polkadot <> Kusama Bridge - - -

Polkadot <> Kusama Bridge

-

- Our bridge connects two parachains - Kusama Bridge Hub and Polkadot Bridge Hub. Messages that - are sent over bridge have XCM format and we are using existing architecture to dispatch them. - Since both Polkadot, Kusama and their parachains already have means to exchange XCM messages - within the same consensus system (HRMP, VMP, ...), it means that we are able to connect all those - chains with our bridge. -

-

- In our architecture, the lane that is used to relay messages over the bridge is determined by - the XCM source and destinations. So e.g. bridge between Asset Hubs Polkadot and Kusama (and opposite direction) - will use the lane 00000000, bridge between some other Polkadot Parachain and some other Kusama Parachain - will use the lane 00000001 and so on. -

-
- flowchart LR - subgraph Polkadot Consensus - polkadot(((Polkadot))) - asset_hub_polkadot(((Polkadot Asset Hub))) - polkadot_bh(((Polkadot Bridge Hub))) - - polkadot---asset_hub_polkadot - polkadot---polkadot_bh - - asset_hub_polkadot-->|Send Message Using HRMP|polkadot_bh - - polkadot_bh-->|Send Message Using HRMP|asset_hub_polkadot - asset_hub_polkadot-->|Dispatch the Message|asset_hub_polkadot - end - subgraph Kusama Consensus - kusama_bh(((Kusama Bridge Hub))) - asset_hub_kusama(((Kusama Asset Hub))) - kusama(((Kusama))) - - kusama---asset_hub_kusama - kusama---kusama_bh - - kusama_bh-->|Send Message Using HRMP|asset_hub_kusama - asset_hub_kusama-->|Dispatch the Message|asset_hub_kusama - - asset_hub_kusama-->|Send Message Using HRMP|kusama_bh - end - - polkadot_bh<===>|Message is relayed to the Bridged Chain using lane 00000000|kusama_bh - - linkStyle 2 stroke:red - linkStyle 7 stroke:red - linkStyle 8 stroke:red - - linkStyle 3 stroke:green - linkStyle 4 stroke:green - linkStyle 9 stroke:green -
- - - \ No newline at end of file diff --git a/docs/running-relayer.md b/docs/running-relayer.md deleted file mode 100644 index 710810a476e4df5e4b80fde31f9576be5ad26391..0000000000000000000000000000000000000000 --- a/docs/running-relayer.md +++ /dev/null @@ -1,343 +0,0 @@ -# Running your own bridge relayer - -:warning: :construction: Please read the [Disclaimer](#disclaimer) section first :construction: :warning: - -## Disclaimer - -There are several things you should know before running your own relayer: - -- initial bridge version (we call it bridges v1) supports any number of relayers, but **there's no guaranteed -compensation** for running a relayer and/or submitting valid bridge transactions. Most probably you'll end up -spending more funds than getting from rewards - please accept this fact; - -- even if your relayer has managed to submit a valid bridge transaction that has been included into the bridge -hub block, there's no guarantee that you will be able to claim your compensation for that transaction. That's -because compensations are paid from the account, controlled by relay chain governance and it could have no funds -to compensate your useful actions. We'll be working on a proper process to resupply it on-time, but we can't -provide any guarantee until that process is well established. - -## A Brief Introduction into Relayers and our Compensations Scheme - -Omitting details, relayer is an offchain process that is connected to both bridged chains. It looks at the -outbound bridge messages queue and submits message delivery transactions to the target chain. There's a lot -of details behind that simple phrase - you could find more info in the -[High-Level Bridge Overview](./high-level-overview.md) document. - -Reward that is paid to relayer has two parts. The first part static and is controlled by the governance. -It is rather small initially - e.g. you need to deliver `10_000` Kusama -> Polkadot messages to gain single -KSM token. - -The other reward part is dynamic. So to deliver an XCM message from one BridgeHub to another, we'll need to -submit two transactions on different chains. Every transaction has its cost, which is: - -- dynamic, because e.g. message size can change and/or fee factor of the target chain may change; - -- quite large, because those transactions are quite heavy (mostly in terms of size, not weight). - -We are compensating the cost of **valid**, **minimal** and **useful** bridge-related transactions to -relayer, that has submitted such transaction. Valid here means that the transaction doesn't fail. Minimal -means that all data within transaction call is actually required for the transaction to succeed. Useful -means that all supplied data in transaction is new and yet unknown to the target chain. - -We have implemented a relayer that is able to craft such transactions. The rest of document contains a detailed -information on how to deploy this software on your own node. - -## Relayers Concurrency - -As it has been said above, we are not compensating cost of transactions that are not **useful**. For -example, if message `100` has already been delivered from Kusama Bridge Hub to Polkadot Bridge Hub, then another -transaction that delivers the same message `100` won't be **useful**. Hence, no compensation to relayer that -has submitted that second transaction. - -But what if there are several relayers running? They are noticing the same queued message `100` and -simultaneously submit identical message delivery transactions. You may expect that there'll be one lucky -relayer, whose transaction would win the "race" and which will receive the compensation and reward. And -there'll be several other relayers, losing some funds on their unuseful transactions. - -But actually, we have a solution that invalidates transactions of "unlucky" relayers before they are -included into the block. So at least you may be sure that you won't waste your funds on duplicate transactions. - -
-Some details? - -All **unuseful** transactions are rejected by our -[transaction extension](https://github.com/paritytech/polkadot-sdk/blob/master/bridges/bin/runtime-common/src/refund_relayer_extension.rs), -which also handles transaction fee compensations. You may find more info on unuseful (aka obsolete) transactions -by lurking in the code. - -We also have the WiP prototype of relayers coordination protocol, where relayers will get some guarantee -that their transactions will be prioritized over other relayers transactions at their assigned slots. -That is planned for the future version of bridge and the progress is -[tracked here](https://github.com/paritytech/parity-bridges-common/issues/2486). - -
- -## Prerequisites - -Let's focus on the bridge between Polkadot and Kusama Bridge Hubs. Let's also assume that we want to start -a relayer that "serves" an initial lane [`0x00000001`](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L54). - -
-Lane? - -Think of lane as a queue of messages that need to be delivered to the other/bridged chain. The lane is -bidirectional, meaning that there are four "endpoints". Two "outbound" endpoints (one at every chain), contain -messages that need to be delivered to the bridged chain. Two "inbound" are accepting messages from the bridged -chain and also remember the relayer, who has delivered message(s) to reward it later. - -
- -The same steps may be performed for other lanes and bridges as well - you'll just need to change several parameters. - -So to start your relayer instance, you'll need to prepare: - -- an address of ws/wss RPC endpoint of the Kusama relay chain; - -- an address of ws/wss RPC endpoint of the Polkadot relay chain; - -- an address of ws/wss RPC endpoint of the Kusama Bridge Hub chain; - -- an address of ws/wss RPC endpoint of the Polkadot Bridge Hub chain; - -- an account on Kusama Bridge Hub; - -- an account on Polkadot Bridge Hub. - -For RPC endpoints, you could start your own nodes, or use some public community nodes. Nodes are not meant to be -archive or provide access to insecure RPC calls. - -To create an account on Bridge Hubs, you could use XCM teleport functionality. E.g. if you have an account on -the relay chain, you could use the `teleportAssets` call of `xcmPallet` and send asset -`V3 { id: Concrete(0, Here), Fungible: }` to beneficiary `V3(0, X1(AccountId32()))` -on destination `V3(0, X1(Parachain(1002)))`. To estimate amounts you need, please refer to the [Costs](#costs) -section of the document. - -## Registering your Relayer Account (Optional, But Please Read) - -Bridge transactions are quite heavy and expensive. We want to minimize block space that can be occupied by -invalid bridge transactions and prioritize valid transactions over invalid. That is achieved by **optional** -relayer registration. Transactions, signed by relayers with active registration, gain huge priority boost. -In exchange, such relayers may be slashed if they submit **invalid** or **non-minimal** transaction. - -Transactions, signed by relayers **without** active registration, on the other hand, receive no priority -boost. It means that if there is active registered relayer, most likely all transactions from unregistered -will be counted as **unuseful**, not included into the block and unregistered relayer won't get any reward -for his operations. - -Before registering, you should know several things about your funds: - -- to register, you need to hold significant amount of funds on your relayer account. As of now, it is - [100 KSM](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-kusama/src/bridge_to_polkadot_config.rs#L71C14-L71C43) - for registration on Kusama Bridge Hub and - [500 DOT](https://github.com/polkadot-fellows/runtimes/blob/9ce1bbbbcd7843b3c76ba4d43c036bc311959e9f/system-parachains/bridge-hubs/bridge-hub-polkadot/src/bridge_to_kusama_config.rs#L71C14-L71C43) - for registration on Polkadot Bridge Hub; - -- when you are registered, those funds are reserved on relayer account and you can't transfer them. - -The registration itself, has three states: active, inactive or expired. Initially, it is active, meaning that all -your transactions that are **validated** on top of block, where it is active get priority boost. Registration -becomes expired when the block with the number you have specified during registration is "mined". It is the -`validTill` parameter of the `register` call (see below). After that `validTill` block, you may unregister and get -your reserved funds back. There's also an intermediate point between those blocks - it is the `validTill - LEASE`, -where `LEASE` is the the chain constant, controlled by the governance. Initially it is set to `300` blocks. -All your transactions, **validated** between the `validTill - LEASE` and `validTill` blocks do not get the -priority boost. Also, it is forbidden to specify `validTill` such that the `validTill - currentBlock` is less -than the `LEASE`. - -
-Example? - -| Bridge Hub Block | Registration State | Comment | -| ----------------- | ------------------ | ------------------------------------------------------ | -| 100 | Active | You have submitted a tx with the `register(1000)` call | -| 101 | Active | Your message delivery transactions are boosted | -| 102 | Active | Your message delivery transactions are boosted | -| ... | Active | Your message delivery transactions are boosted | -| 700 | Inactive | Your message delivery transactions are not boosted | -| 701 | Inactive | Your message delivery transactions are not boosted | -| ... | Inactive | Your message delivery transactions are not boosted | -| 1000 | Expired | Your may submit a tx with the `deregister` call | - -
- -So once you have enough funds on your account and have selected the `validTill` parameter value, you -could use the Polkadot JS apps to submit an extrinsic. If you want priority boost for your transactions -on the Kusama Bridge Hub, open the -[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) -and submit the `register` extrinsic from the `bridgeRelayers` pallet: - -![Register Extrinsic](./bridge-relayers-register.png) - -To deregister, submit the simple `deregister` extrinsic when registration is expired: - -![Deregister Extrinsic](./bridge-relayers-deregister.png) - -At any time, you can prolong your registration by calling the `register` with the larger `validTill`. - -## Costs - -Your relayer account (on both Bridge Hubs) must hold enough funds to be able to pay costs of bridge -transactions. If your relayer behaves correctly, those costs will be compensated and you will be -able to claim it later. - -**IMPORTANT**: you may add tip to your bridge transactions to boost their priority. But our -compensation mechanism never refunds transaction tip, so all tip tokens will be lost. - -
-Types of bridge transactions - -There are two types of bridge transactions: - -- message delivery transaction brings queued message(s) from one Bridge Hub to another. We record - the fact that this specific (your) relayer has delivered those messages; - -- message confirmation transaction confirms that some message have been delivered and also brings - back information on how many messages (your) relayer has delivered. We use this information later - to register delivery rewards on the source chain. - -Several messages/confirmations may be included in a single bridge transaction. Apart from this -data, bridge transaction may include finality and storage proofs, required to prove authenticity of -this data. - -
- -To deliver and get reward for a single message, the relayer needs to submit two transactions. One -at the source Bridge Hub and one at the target Bridge Hub. Below are costs for Polkadot <> Kusama -messages (as of today): - -- to deliver a single Polkadot -> Kusama message, you would need to pay around `0.06 KSM` at Kusama - Bridge Hub and around `1.62 DOT` at Polkadot Bridge Hub; - -- to deliver a single Kusama -> Polkadot message, you would need to pay around `1.70 DOT` at Polkadot - Bridge Hub and around `0.05 KSM` at Kusama Bridge Hub. - -Those values are not constants - they depend on call weights (that may change from release to release), -on transaction sizes (that depends on message size and chain state) and congestion factor. In any -case - it is your duty to make sure that the relayer has enough funds to pay transaction fees. - -## Claiming your Compensations and Rewards - -Hopefully you have successfully delivered some messages and now can claim your compensation and reward. -This requires submitting several transactions. But first, let's check that you actually have something to -claim. For that, let's check the state of the pallet that tracks all rewards. - -To check your rewards at the Kusama Bridge Hub, go to the -[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/chainstate) -targeting Kusama Bridge Hub, select the `bridgeRelayers` pallet, choose `relayerRewards` map and -your relayer account. Then: - -- set the `laneId` to `0x00000001` - -- set the `bridgedChainId` to `bhpd`; - -- check the both variants of the `owner` field: `ThisChain` is used to pay for message delivery transactions - and `BridgedChain` is used to pay for message confirmation transactions. - -If check shows that you have some rewards, you can craft the claim transaction, with similar parameters. -For that, go to `Extrinsics` tab of the -[Polkadot JS Apps](https://polkadot.js.org/apps/?rpc=wss%3A%2F%2Fkusama-bridge-hub-rpc.polkadot.io#/extrinsics) -and submit the following transaction (make sure to change `owner` before): - -![Claim Rewards Extrinsic](./bridge-relayers-claim-rewards.png) - -To claim rewards on Polkadot Bridge Hub you can follow the same process. The only difference is that you -need to set value of the `bridgedChainId` to `bhks`. - -## Starting your Relayer - -### Starting your Rococo <> Westend Relayer - -You may find the relayer image reference in the -[Releases](https://github.com/paritytech/parity-bridges-common/releases) -of this repository. Make sure to check supported (bundled) versions -of release there. For Rococo <> Westend bridge, normally you may use the -latest published release. The release notes always contain the docker -image reference and source files, required to build relayer manually. - -Once you have the docker image, update variables and run the following script: -```sh -export DOCKER_IMAGE= - -export ROCOCO_HOST= -export ROCOCO_PORT= -# or set it to '--rococo-secure' if wss is used above -export ROCOCO_IS_SECURE= -export BRIDGE_HUB_ROCOCO_HOST= -export BRIDGE_HUB_ROCOCO_PORT= -# or set it to '--bridge-hub-rococo-secure' if wss is used above -export BRIDGE_HUB_ROCOCO_IS_SECURE= -export BRIDGE_HUB_ROCOCO_KEY_FILE= - -export WESTEND_HOST= -export WESTEND_PORT= -# or set it to '--westend-secure' if wss is used above -export WESTEND_IS_SECURE= -export BRIDGE_HUB_WESTEND_HOST= -export BRIDGE_HUB_WESTEND_PORT= -# or set it to '--bridge-hub-westend-secure ' if wss is used above -export BRIDGE_HUB_WESTEND_IS_SECURE= -export BRIDGE_HUB_WESTEND_KEY_FILE= - -# you can get extended relay logs (e.g. for debugging issues) by passing `-e RUST_LOG=bridge=trace` -# argument to the `docker` binary -docker run \ - -v $BRIDGE_HUB_ROCOCO_KEY_FILE:/bhr.key \ - -v $BRIDGE_HUB_WESTEND_KEY_FILE:/bhw.key \ - $DOCKER_IMAGE \ - relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ - --rococo-host $ROCOCO_HOST \ - --rococo-port $ROCOCO_PORT \ - $ROCOCO_IS_SECURE \ - --rococo-version-mode Auto \ - --bridge-hub-rococo-host $BRIDGE_HUB_ROCOCO_HOST \ - --bridge-hub-rococo-port $BRIDGE_HUB_ROCOCO_PORT \ - $BRIDGE_HUB_ROCOCO_IS_SECURE \ - --bridge-hub-rococo-version-mode Auto \ - --bridge-hub-rococo-signer-file /bhr.key \ - --bridge-hub-rococo-transactions-mortality 16 \ - --westend-host $WESTEND_HOST \ - --westend-port $WESTEND_PORT \ - $WESTEND_IS_SECURE \ - --westend-version-mode Auto \ - --bridge-hub-westend-host $BRIDGE_HUB_WESTEND_HOST \ - --bridge-hub-westend-port $BRIDGE_HUB_WESTEND_PORT \ - $BRIDGE_HUB_WESTEND_IS_SECURE \ - --bridge-hub-westend-version-mode Auto \ - --bridge-hub-westend-signer-file /bhw.key \ - --bridge-hub-westend-transactions-mortality 16 \ - --lane 00000002 -``` - -### Starting your Polkadot <> Kusama Relayer - -*Work in progress, coming soon* - -### Watching your relayer state - -Our relayer provides some Prometheus metrics that you may convert into some fancy Grafana dashboards -and alerts. By default, metrics are exposed at port `9616`. To expose endpoint to the localhost, change -the docker command by adding following two lines: - -```sh -docker run \ - .. - -p 127.0.0.1:9616:9616 \ # tell Docker to bind container port 9616 to host port 9616 - # and listen for connections on the host' localhost interface - .. - $DOCKER_IMAGE \ - relay-headers-and-messages bridge-hub-rococo-bridge-hub-westend \ - --prometheus-host 0.0.0.0 \ # tell `substrate-relay` binary to accept Prometheus endpoint - # connections from everywhere - .. -``` - -You can find more info on configuring Prometheus and Grafana in the -[Monitor your node](https://wiki.polkadot.network/docs/maintain-guides-how-to-monitor-your-node) -guide from Polkadot wiki. - -We have our own set of Grafana dashboards and alerts. You may use them for inspiration. -Please find them in this folder: - -- for Rococo <> Westend bridge: [rococo-westend](https://github.com/paritytech/parity-bridges-common/tree/master/deployments/bridges/rococo-westend). - -- for Polkadot <> Kusama bridge: *work in progress, coming soon* diff --git a/modules/beefy/Cargo.toml b/modules/beefy/Cargo.toml deleted file mode 100644 index 30c91feb56da64b612dd3393262e8d587687d7cd..0000000000000000000000000000000000000000 --- a/modules/beefy/Cargo.toml +++ /dev/null @@ -1,55 +0,0 @@ -[package] -name = "pallet-bridge-beefy" -version = "0.1.0" -description = "Module implementing BEEFY on-chain light client used for bridging consensus of substrate-based chains." -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { optional = true, workspace = true } - -# Bridge Dependencies - -bp-beefy = { path = "../../primitives/beefy", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -sp-consensus-beefy = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.3.2" } -pallet-beefy-mmr = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -pallet-mmr = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -rand = "0.8" -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -bp-test-utils = { path = "../../primitives/test-utils" } - -[features] -default = [ "std" ] -std = [ - "bp-beefy/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "serde", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] -try-runtime = [ "frame-support/try-runtime", "frame-system/try-runtime" ] diff --git a/modules/beefy/src/lib.rs b/modules/beefy/src/lib.rs deleted file mode 100644 index 27c83921021bb4299b18cbf2d3216427f8c89ccc..0000000000000000000000000000000000000000 --- a/modules/beefy/src/lib.rs +++ /dev/null @@ -1,651 +0,0 @@ -// Copyright 2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! BEEFY bridge pallet. -//! -//! This pallet is an on-chain BEEFY light client for Substrate-based chains that are using the -//! following pallets bundle: `pallet-mmr`, `pallet-beefy` and `pallet-beefy-mmr`. -//! -//! The pallet is able to verify MMR leaf proofs and BEEFY commitments, so it has access -//! to the following data of the bridged chain: -//! -//! - header hashes -//! - changes of BEEFY authorities -//! - extra data of MMR leafs -//! -//! Given the header hash, other pallets are able to verify header-based proofs -//! (e.g. storage proofs, transaction inclusion proofs, etc.). - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_beefy::{ChainWithBeefy, InitializationData}; -use sp_std::{boxed::Box, prelude::*}; - -// Re-export in crate namespace for `construct_runtime!` -pub use pallet::*; - -mod utils; - -#[cfg(test)] -mod mock; -#[cfg(test)] -mod mock_chain; - -/// The target that will be used when publishing logs related to this pallet. -pub const LOG_TARGET: &str = "runtime::bridge-beefy"; - -/// Configured bridged chain. -pub type BridgedChain = >::BridgedChain; -/// Block number, used by configured bridged chain. -pub type BridgedBlockNumber = bp_runtime::BlockNumberOf>; -/// Block hash, used by configured bridged chain. -pub type BridgedBlockHash = bp_runtime::HashOf>; - -/// Pallet initialization data. -pub type InitializationDataOf = - InitializationData, bp_beefy::MmrHashOf>>; -/// BEEFY commitment hasher, used by configured bridged chain. -pub type BridgedBeefyCommitmentHasher = bp_beefy::BeefyCommitmentHasher>; -/// BEEFY validator id, used by configured bridged chain. -pub type BridgedBeefyAuthorityId = bp_beefy::BeefyAuthorityIdOf>; -/// BEEFY validator set, used by configured bridged chain. -pub type BridgedBeefyAuthoritySet = bp_beefy::BeefyAuthoritySetOf>; -/// BEEFY authority set, used by configured bridged chain. -pub type BridgedBeefyAuthoritySetInfo = bp_beefy::BeefyAuthoritySetInfoOf>; -/// BEEFY signed commitment, used by configured bridged chain. -pub type BridgedBeefySignedCommitment = bp_beefy::BeefySignedCommitmentOf>; -/// MMR hashing algorithm, used by configured bridged chain. -pub type BridgedMmrHashing = bp_beefy::MmrHashingOf>; -/// MMR hashing output type of `BridgedMmrHashing`. -pub type BridgedMmrHash = bp_beefy::MmrHashOf>; -/// The type of the MMR leaf extra data used by the configured bridged chain. -pub type BridgedBeefyMmrLeafExtra = bp_beefy::BeefyMmrLeafExtraOf>; -/// BEEFY MMR proof type used by the pallet -pub type BridgedMmrProof = bp_beefy::MmrProofOf>; -/// MMR leaf type, used by configured bridged chain. -pub type BridgedBeefyMmrLeaf = bp_beefy::BeefyMmrLeafOf>; -/// Imported commitment data, stored by the pallet. -pub type ImportedCommitment = bp_beefy::ImportedCommitment< - BridgedBlockNumber, - BridgedBlockHash, - BridgedMmrHash, ->; - -/// Some high level info about the imported commitments. -#[derive(codec::Encode, codec::Decode, scale_info::TypeInfo)] -pub struct ImportedCommitmentsInfoData { - /// Best known block number, provided in a BEEFY commitment. However this is not - /// the best proven block. The best proven block is this block's parent. - best_block_number: BlockNumber, - /// The head of the `ImportedBlockNumbers` ring buffer. - next_block_number_index: u32, -} - -#[frame_support::pallet(dev_mode)] -pub mod pallet { - use super::*; - use bp_runtime::{BasicOperatingMode, OwnedBridgeModule}; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The upper bound on the number of requests allowed by the pallet. - /// - /// A request refers to an action which writes a header to storage. - /// - /// Once this bound is reached the pallet will reject all commitments - /// until the request count has decreased. - #[pallet::constant] - type MaxRequests: Get; - - /// Maximal number of imported commitments to keep in the storage. - /// - /// The setting is there to prevent growing the on-chain state indefinitely. Note - /// the setting does not relate to block numbers - we will simply keep as much items - /// in the storage, so it doesn't guarantee any fixed timeframe for imported commitments. - #[pallet::constant] - type CommitmentsToKeep: Get; - - /// The chain we are bridging to here. - type BridgedChain: ChainWithBeefy; - } - - #[pallet::pallet] - #[pallet::without_storage_info] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { - fn on_initialize(_n: BlockNumberFor) -> frame_support::weights::Weight { - >::mutate(|count| *count = count.saturating_sub(1)); - - Weight::from_parts(0, 0) - .saturating_add(T::DbWeight::get().reads(1)) - .saturating_add(T::DbWeight::get().writes(1)) - } - } - - impl, I: 'static> OwnedBridgeModule for Pallet { - const LOG_TARGET: &'static str = LOG_TARGET; - type OwnerStorage = PalletOwner; - type OperatingMode = BasicOperatingMode; - type OperatingModeStorage = PalletOperatingMode; - } - - #[pallet::call] - impl, I: 'static> Pallet - where - BridgedMmrHashing: 'static + Send + Sync, - { - /// Initialize pallet with BEEFY authority set and best known finalized block number. - #[pallet::call_index(0)] - #[pallet::weight((T::DbWeight::get().reads_writes(2, 3), DispatchClass::Operational))] - pub fn initialize( - origin: OriginFor, - init_data: InitializationDataOf, - ) -> DispatchResult { - Self::ensure_owner_or_root(origin)?; - - let is_initialized = >::exists(); - ensure!(!is_initialized, >::AlreadyInitialized); - - log::info!(target: LOG_TARGET, "Initializing bridge BEEFY pallet: {:?}", init_data); - Ok(initialize::(init_data)?) - } - - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(1)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { - >::set_owner(origin, new_owner) - } - - /// Halt or resume all pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(2)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - operating_mode: BasicOperatingMode, - ) -> DispatchResult { - >::set_operating_mode(origin, operating_mode) - } - - /// Submit a commitment generated by BEEFY authority set. - /// - /// It will use the underlying storage pallet to fetch information about the current - /// authority set and best finalized block number in order to verify that the commitment - /// is valid. - /// - /// If successful in verification, it will update the underlying storage with the data - /// provided in the newly submitted commitment. - #[pallet::call_index(3)] - #[pallet::weight(0)] - pub fn submit_commitment( - origin: OriginFor, - commitment: BridgedBeefySignedCommitment, - validator_set: BridgedBeefyAuthoritySet, - mmr_leaf: Box>, - mmr_proof: BridgedMmrProof, - ) -> DispatchResult - where - BridgedBeefySignedCommitment: Clone, - { - Self::ensure_not_halted().map_err(Error::::BridgeModule)?; - ensure_signed(origin)?; - - ensure!(Self::request_count() < T::MaxRequests::get(), >::TooManyRequests); - - // Ensure that the commitment is for a better block. - let commitments_info = - ImportedCommitmentsInfo::::get().ok_or(Error::::NotInitialized)?; - ensure!( - commitment.commitment.block_number > commitments_info.best_block_number, - Error::::OldCommitment - ); - - // Verify commitment and mmr leaf. - let current_authority_set_info = CurrentAuthoritySetInfo::::get(); - let mmr_root = utils::verify_commitment::( - &commitment, - ¤t_authority_set_info, - &validator_set, - )?; - utils::verify_beefy_mmr_leaf::(&mmr_leaf, mmr_proof, mmr_root)?; - - // Update request count. - RequestCount::::mutate(|count| *count += 1); - // Update authority set if needed. - if mmr_leaf.beefy_next_authority_set.id > current_authority_set_info.id { - CurrentAuthoritySetInfo::::put(mmr_leaf.beefy_next_authority_set); - } - - // Import commitment. - let block_number_index = commitments_info.next_block_number_index; - let to_prune = ImportedBlockNumbers::::try_get(block_number_index); - ImportedCommitments::::insert( - commitment.commitment.block_number, - ImportedCommitment:: { - parent_number_and_hash: mmr_leaf.parent_number_and_hash, - mmr_root, - }, - ); - ImportedBlockNumbers::::insert( - block_number_index, - commitment.commitment.block_number, - ); - ImportedCommitmentsInfo::::put(ImportedCommitmentsInfoData { - best_block_number: commitment.commitment.block_number, - next_block_number_index: (block_number_index + 1) % T::CommitmentsToKeep::get(), - }); - if let Ok(old_block_number) = to_prune { - log::debug!( - target: LOG_TARGET, - "Pruning commitment for old block: {:?}.", - old_block_number - ); - ImportedCommitments::::remove(old_block_number); - } - - log::info!( - target: LOG_TARGET, - "Successfully imported commitment for block {:?}", - commitment.commitment.block_number, - ); - - Ok(()) - } - } - - /// The current number of requests which have written to storage. - /// - /// If the `RequestCount` hits `MaxRequests`, no more calls will be allowed to the pallet until - /// the request capacity is increased. - /// - /// The `RequestCount` is decreased by one at the beginning of every block. This is to ensure - /// that the pallet can always make progress. - #[pallet::storage] - #[pallet::getter(fn request_count)] - pub type RequestCount, I: 'static = ()> = StorageValue<_, u32, ValueQuery>; - - /// High level info about the imported commitments. - /// - /// Contains the following info: - /// - best known block number of the bridged chain, finalized by BEEFY - /// - the head of the `ImportedBlockNumbers` ring buffer - #[pallet::storage] - pub type ImportedCommitmentsInfo, I: 'static = ()> = - StorageValue<_, ImportedCommitmentsInfoData>>; - - /// A ring buffer containing the block numbers of the commitments that we have imported, - /// ordered by the insertion time. - #[pallet::storage] - pub(super) type ImportedBlockNumbers, I: 'static = ()> = - StorageMap<_, Identity, u32, BridgedBlockNumber>; - - /// All the commitments that we have imported and haven't been pruned yet. - #[pallet::storage] - pub type ImportedCommitments, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, BridgedBlockNumber, ImportedCommitment>; - - /// The current BEEFY authority set at the bridged chain. - #[pallet::storage] - pub type CurrentAuthoritySetInfo, I: 'static = ()> = - StorageValue<_, BridgedBeefyAuthoritySetInfo, ValueQuery>; - - /// Optional pallet owner. - /// - /// Pallet owner has the right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. `democracy::referendum` to update halt - /// flag directly or calling `halt_operations`). - #[pallet::storage] - pub type PalletOwner, I: 'static = ()> = - StorageValue<_, T::AccountId, OptionQuery>; - - /// The current operating mode of the pallet. - /// - /// Depending on the mode either all, or no transactions will be allowed. - #[pallet::storage] - pub type PalletOperatingMode, I: 'static = ()> = - StorageValue<_, BasicOperatingMode, ValueQuery>; - - #[pallet::genesis_config] - #[derive(frame_support::DefaultNoBound)] - pub struct GenesisConfig, I: 'static = ()> { - /// Optional module owner account. - pub owner: Option, - /// Optional module initialization data. - pub init_data: Option>, - } - - #[pallet::genesis_build] - impl, I: 'static> BuildGenesisConfig for GenesisConfig { - fn build(&self) { - if let Some(ref owner) = self.owner { - >::put(owner); - } - - if let Some(init_data) = self.init_data.clone() { - initialize::(init_data) - .expect("invalid initialization data of BEEFY bridge pallet"); - } else { - // Since the bridge hasn't been initialized we shouldn't allow anyone to perform - // transactions. - >::put(BasicOperatingMode::Halted); - } - } - } - - #[pallet::error] - pub enum Error { - /// The pallet has not been initialized yet. - NotInitialized, - /// The pallet has already been initialized. - AlreadyInitialized, - /// Invalid initial authority set. - InvalidInitialAuthoritySet, - /// There are too many requests for the current window to handle. - TooManyRequests, - /// The imported commitment is older than the best commitment known to the pallet. - OldCommitment, - /// The commitment is signed by unknown validator set. - InvalidCommitmentValidatorSetId, - /// The id of the provided validator set is invalid. - InvalidValidatorSetId, - /// The number of signatures in the commitment is invalid. - InvalidCommitmentSignaturesLen, - /// The number of validator ids provided is invalid. - InvalidValidatorSetLen, - /// There aren't enough correct signatures in the commitment to finalize the block. - NotEnoughCorrectSignatures, - /// MMR root is missing from the commitment. - MmrRootMissingFromCommitment, - /// MMR proof verification has failed. - MmrProofVerificationFailed, - /// The validators are not matching the merkle tree root of the authority set. - InvalidValidatorSetRoot, - /// Error generated by the `OwnedBridgeModule` trait. - BridgeModule(bp_runtime::OwnedBridgeModuleError), - } - - /// Initialize pallet with given parameters. - pub(super) fn initialize, I: 'static>( - init_data: InitializationDataOf, - ) -> Result<(), Error> { - if init_data.authority_set.len == 0 { - return Err(Error::::InvalidInitialAuthoritySet) - } - CurrentAuthoritySetInfo::::put(init_data.authority_set); - - >::put(init_data.operating_mode); - ImportedCommitmentsInfo::::put(ImportedCommitmentsInfoData { - best_block_number: init_data.best_block_number, - next_block_number_index: 0, - }); - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_runtime::{BasicOperatingMode, OwnedBridgeModuleError}; - use bp_test_utils::generate_owned_bridge_module_tests; - use frame_support::{assert_noop, assert_ok, traits::Get}; - use mock::*; - use mock_chain::*; - use sp_consensus_beefy::mmr::BeefyAuthoritySet; - use sp_runtime::DispatchError; - - fn next_block() { - use frame_support::traits::OnInitialize; - - let current_number = frame_system::Pallet::::block_number(); - frame_system::Pallet::::set_block_number(current_number + 1); - let _ = Pallet::::on_initialize(current_number); - } - - fn import_header_chain(headers: Vec) { - for header in headers { - if header.commitment.is_some() { - assert_ok!(import_commitment(header)); - } - } - } - - #[test] - fn fails_to_initialize_if_already_initialized() { - run_test_with_initialize(32, || { - assert_noop!( - Pallet::::initialize( - RuntimeOrigin::root(), - InitializationData { - operating_mode: BasicOperatingMode::Normal, - best_block_number: 0, - authority_set: BeefyAuthoritySet { - id: 0, - len: 1, - keyset_commitment: [0u8; 32].into() - } - } - ), - Error::::AlreadyInitialized, - ); - }); - } - - #[test] - fn fails_to_initialize_if_authority_set_is_empty() { - run_test(|| { - assert_noop!( - Pallet::::initialize( - RuntimeOrigin::root(), - InitializationData { - operating_mode: BasicOperatingMode::Normal, - best_block_number: 0, - authority_set: BeefyAuthoritySet { - id: 0, - len: 0, - keyset_commitment: [0u8; 32].into() - } - } - ), - Error::::InvalidInitialAuthoritySet, - ); - }); - } - - #[test] - fn fails_to_import_commitment_if_halted() { - run_test_with_initialize(1, || { - assert_ok!(Pallet::::set_operating_mode( - RuntimeOrigin::root(), - BasicOperatingMode::Halted - )); - assert_noop!( - import_commitment(ChainBuilder::new(1).append_finalized_header().to_header()), - Error::::BridgeModule(OwnedBridgeModuleError::Halted), - ); - }) - } - - #[test] - fn fails_to_import_commitment_if_too_many_requests() { - run_test_with_initialize(1, || { - let max_requests = <::MaxRequests as Get>::get() as u64; - let mut chain = ChainBuilder::new(1); - for _ in 0..max_requests + 2 { - chain = chain.append_finalized_header(); - } - - // import `max_request` headers - for i in 0..max_requests { - assert_ok!(import_commitment(chain.header(i + 1))); - } - - // try to import next header: it fails because we are no longer accepting commitments - assert_noop!( - import_commitment(chain.header(max_requests + 1)), - Error::::TooManyRequests, - ); - - // when next block is "started", we allow import of next header - next_block(); - assert_ok!(import_commitment(chain.header(max_requests + 1))); - - // but we can't import two headers until next block and so on - assert_noop!( - import_commitment(chain.header(max_requests + 2)), - Error::::TooManyRequests, - ); - }) - } - - #[test] - fn fails_to_import_commitment_if_not_initialized() { - run_test(|| { - assert_noop!( - import_commitment(ChainBuilder::new(1).append_finalized_header().to_header()), - Error::::NotInitialized, - ); - }) - } - - #[test] - fn submit_commitment_works_with_long_chain_with_handoffs() { - run_test_with_initialize(3, || { - let chain = ChainBuilder::new(3) - .append_finalized_header() - .append_default_headers(16) // 2..17 - .append_finalized_header() // 18 - .append_default_headers(16) // 19..34 - .append_handoff_header(9) // 35 - .append_default_headers(8) // 36..43 - .append_finalized_header() // 44 - .append_default_headers(8) // 45..52 - .append_handoff_header(17) // 53 - .append_default_headers(4) // 54..57 - .append_finalized_header() // 58 - .append_default_headers(4); // 59..63 - import_header_chain(chain.to_chain()); - - assert_eq!( - ImportedCommitmentsInfo::::get().unwrap().best_block_number, - 58 - ); - assert_eq!(CurrentAuthoritySetInfo::::get().id, 2); - assert_eq!(CurrentAuthoritySetInfo::::get().len, 17); - - let imported_commitment = ImportedCommitments::::get(58).unwrap(); - assert_eq!( - imported_commitment, - bp_beefy::ImportedCommitment { - parent_number_and_hash: (57, chain.header(57).header.hash()), - mmr_root: chain.header(58).mmr_root, - }, - ); - }) - } - - #[test] - fn commitment_pruning_works() { - run_test_with_initialize(3, || { - let commitments_to_keep = >::CommitmentsToKeep::get(); - let commitments_to_import: Vec = ChainBuilder::new(3) - .append_finalized_headers(commitments_to_keep as usize + 2) - .to_chain(); - - // import exactly `CommitmentsToKeep` commitments - for index in 0..commitments_to_keep { - next_block(); - import_commitment(commitments_to_import[index as usize].clone()) - .expect("must succeed"); - assert_eq!( - ImportedCommitmentsInfo::::get().unwrap().next_block_number_index, - (index + 1) % commitments_to_keep - ); - } - - // ensure that all commitments are in the storage - assert_eq!( - ImportedCommitmentsInfo::::get().unwrap().best_block_number, - commitments_to_keep as TestBridgedBlockNumber - ); - assert_eq!( - ImportedCommitmentsInfo::::get().unwrap().next_block_number_index, - 0 - ); - for index in 0..commitments_to_keep { - assert!(ImportedCommitments::::get( - index as TestBridgedBlockNumber + 1 - ) - .is_some()); - assert_eq!( - ImportedBlockNumbers::::get(index), - Some(Into::into(index + 1)), - ); - } - - // import next commitment - next_block(); - import_commitment(commitments_to_import[commitments_to_keep as usize].clone()) - .expect("must succeed"); - assert_eq!( - ImportedCommitmentsInfo::::get().unwrap().next_block_number_index, - 1 - ); - assert!(ImportedCommitments::::get( - commitments_to_keep as TestBridgedBlockNumber + 1 - ) - .is_some()); - assert_eq!( - ImportedBlockNumbers::::get(0), - Some(Into::into(commitments_to_keep + 1)), - ); - // the side effect of the import is that the commitment#1 is pruned - assert!(ImportedCommitments::::get(1).is_none()); - - // import next commitment - next_block(); - import_commitment(commitments_to_import[commitments_to_keep as usize + 1].clone()) - .expect("must succeed"); - assert_eq!( - ImportedCommitmentsInfo::::get().unwrap().next_block_number_index, - 2 - ); - assert!(ImportedCommitments::::get( - commitments_to_keep as TestBridgedBlockNumber + 2 - ) - .is_some()); - assert_eq!( - ImportedBlockNumbers::::get(1), - Some(Into::into(commitments_to_keep + 2)), - ); - // the side effect of the import is that the commitment#2 is pruned - assert!(ImportedCommitments::::get(1).is_none()); - assert!(ImportedCommitments::::get(2).is_none()); - }); - } - - generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted); -} diff --git a/modules/beefy/src/mock.rs b/modules/beefy/src/mock.rs deleted file mode 100644 index c99566b6b06d1855319d614f4f4ddfbf2fb1918b..0000000000000000000000000000000000000000 --- a/modules/beefy/src/mock.rs +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate as beefy; -use crate::{ - utils::get_authorities_mmr_root, BridgedBeefyAuthoritySet, BridgedBeefyAuthoritySetInfo, - BridgedBeefyCommitmentHasher, BridgedBeefyMmrLeafExtra, BridgedBeefySignedCommitment, - BridgedMmrHash, BridgedMmrHashing, BridgedMmrProof, -}; - -use bp_beefy::{BeefyValidatorSignatureOf, ChainWithBeefy, Commitment, MmrDataOrHash}; -use bp_runtime::{BasicOperatingMode, Chain, ChainId}; -use codec::Encode; -use frame_support::{construct_runtime, derive_impl, weights::Weight}; -use sp_core::{sr25519::Signature, Pair}; -use sp_runtime::{ - testing::{Header, H256}, - traits::{BlakeTwo256, Hash}, -}; - -pub use sp_consensus_beefy::ecdsa_crypto::{AuthorityId as BeefyId, Pair as BeefyPair}; -use sp_core::crypto::Wraps; -use sp_runtime::traits::Keccak256; - -pub type TestAccountId = u64; -pub type TestBridgedBlockNumber = u64; -pub type TestBridgedBlockHash = H256; -pub type TestBridgedHeader = Header; -pub type TestBridgedAuthoritySetInfo = BridgedBeefyAuthoritySetInfo; -pub type TestBridgedValidatorSet = BridgedBeefyAuthoritySet; -pub type TestBridgedCommitment = BridgedBeefySignedCommitment; -pub type TestBridgedValidatorSignature = BeefyValidatorSignatureOf; -pub type TestBridgedCommitmentHasher = BridgedBeefyCommitmentHasher; -pub type TestBridgedMmrHashing = BridgedMmrHashing; -pub type TestBridgedMmrHash = BridgedMmrHash; -pub type TestBridgedBeefyMmrLeafExtra = BridgedBeefyMmrLeafExtra; -pub type TestBridgedMmrProof = BridgedMmrProof; -pub type TestBridgedRawMmrLeaf = sp_consensus_beefy::mmr::MmrLeaf< - TestBridgedBlockNumber, - TestBridgedBlockHash, - TestBridgedMmrHash, - TestBridgedBeefyMmrLeafExtra, ->; -pub type TestBridgedMmrNode = MmrDataOrHash; - -type Block = frame_system::mocking::MockBlock; - -construct_runtime! { - pub enum TestRuntime - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Beefy: beefy::{Pallet}, - } -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type Block = Block; -} - -impl beefy::Config for TestRuntime { - type MaxRequests = frame_support::traits::ConstU32<16>; - type BridgedChain = TestBridgedChain; - type CommitmentsToKeep = frame_support::traits::ConstU32<16>; -} - -#[derive(Debug)] -pub struct TestBridgedChain; - -impl Chain for TestBridgedChain { - const ID: ChainId = *b"tbch"; - - type BlockNumber = TestBridgedBlockNumber; - type Hash = H256; - type Hasher = BlakeTwo256; - type Header = sp_runtime::testing::Header; - - type AccountId = TestAccountId; - type Balance = u64; - type Nonce = u64; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl ChainWithBeefy for TestBridgedChain { - type CommitmentHasher = Keccak256; - type MmrHashing = Keccak256; - type MmrHash = ::Output; - type BeefyMmrLeafExtra = (); - type AuthorityId = BeefyId; - type AuthorityIdToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; -} - -/// Run test within test runtime. -pub fn run_test(test: impl FnOnce() -> T) -> T { - sp_io::TestExternalities::new(Default::default()).execute_with(test) -} - -/// Initialize pallet and run test. -pub fn run_test_with_initialize(initial_validators_count: u32, test: impl FnOnce() -> T) -> T { - run_test(|| { - let validators = validator_ids(0, initial_validators_count); - let authority_set = authority_set_info(0, &validators); - - crate::Pallet::::initialize( - RuntimeOrigin::root(), - bp_beefy::InitializationData { - operating_mode: BasicOperatingMode::Normal, - best_block_number: 0, - authority_set, - }, - ) - .expect("initialization data is correct"); - - test() - }) -} - -/// Import given commitment. -pub fn import_commitment( - header: crate::mock_chain::HeaderAndCommitment, -) -> sp_runtime::DispatchResult { - crate::Pallet::::submit_commitment( - RuntimeOrigin::signed(1), - header - .commitment - .expect("thou shall not call import_commitment on header without commitment"), - header.validator_set, - Box::new(header.leaf), - header.leaf_proof, - ) -} - -pub fn validator_pairs(index: u32, count: u32) -> Vec { - (index..index + count) - .map(|index| { - let mut seed = [1u8; 32]; - seed[0..8].copy_from_slice(&(index as u64).encode()); - BeefyPair::from_seed(&seed) - }) - .collect() -} - -/// Return identifiers of validators, starting at given index. -pub fn validator_ids(index: u32, count: u32) -> Vec { - validator_pairs(index, count).into_iter().map(|pair| pair.public()).collect() -} - -pub fn authority_set_info(id: u64, validators: &[BeefyId]) -> TestBridgedAuthoritySetInfo { - let merkle_root = get_authorities_mmr_root::(validators.iter()); - - TestBridgedAuthoritySetInfo { id, len: validators.len() as u32, keyset_commitment: merkle_root } -} - -/// Sign BEEFY commitment. -pub fn sign_commitment( - commitment: Commitment, - validator_pairs: &[BeefyPair], - signature_count: usize, -) -> TestBridgedCommitment { - let total_validators = validator_pairs.len(); - let random_validators = - rand::seq::index::sample(&mut rand::thread_rng(), total_validators, signature_count); - - let commitment_hash = TestBridgedCommitmentHasher::hash(&commitment.encode()); - let mut signatures = vec![None; total_validators]; - for validator_idx in random_validators.iter() { - let validator = &validator_pairs[validator_idx]; - signatures[validator_idx] = - Some(validator.as_inner_ref().sign_prehashed(commitment_hash.as_fixed_bytes()).into()); - } - - TestBridgedCommitment { commitment, signatures } -} diff --git a/modules/beefy/src/mock_chain.rs b/modules/beefy/src/mock_chain.rs deleted file mode 100644 index c4fa74915bfeb1c64ac1604ec632311e1dfd7cc8..0000000000000000000000000000000000000000 --- a/modules/beefy/src/mock_chain.rs +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities to build bridged chain and BEEFY+MMR structures. - -use crate::{ - mock::{ - sign_commitment, validator_pairs, BeefyPair, TestBridgedBlockNumber, TestBridgedCommitment, - TestBridgedHeader, TestBridgedMmrHash, TestBridgedMmrHashing, TestBridgedMmrNode, - TestBridgedMmrProof, TestBridgedRawMmrLeaf, TestBridgedValidatorSet, - TestBridgedValidatorSignature, TestRuntime, - }, - utils::get_authorities_mmr_root, -}; - -use bp_beefy::{BeefyPayload, Commitment, ValidatorSetId, MMR_ROOT_PAYLOAD_ID}; -use codec::Encode; -use pallet_mmr::NodeIndex; -use rand::Rng; -use sp_consensus_beefy::mmr::{BeefyNextAuthoritySet, MmrLeafVersion}; -use sp_core::Pair; -use sp_runtime::traits::{Hash, Header as HeaderT}; -use std::collections::HashMap; - -#[derive(Debug, Clone)] -pub struct HeaderAndCommitment { - pub header: TestBridgedHeader, - pub commitment: Option, - pub validator_set: TestBridgedValidatorSet, - pub leaf: TestBridgedRawMmrLeaf, - pub leaf_proof: TestBridgedMmrProof, - pub mmr_root: TestBridgedMmrHash, -} - -impl HeaderAndCommitment { - pub fn customize_signatures( - &mut self, - f: impl FnOnce(&mut Vec>), - ) { - if let Some(commitment) = &mut self.commitment { - f(&mut commitment.signatures); - } - } - - pub fn customize_commitment( - &mut self, - f: impl FnOnce(&mut Commitment), - validator_pairs: &[BeefyPair], - signature_count: usize, - ) { - if let Some(mut commitment) = self.commitment.take() { - f(&mut commitment.commitment); - self.commitment = - Some(sign_commitment(commitment.commitment, validator_pairs, signature_count)); - } - } -} - -pub struct ChainBuilder { - headers: Vec, - validator_set_id: ValidatorSetId, - validator_keys: Vec, - mmr: mmr_lib::MMR, -} - -struct BridgedMmrStorage { - nodes: HashMap, -} - -impl mmr_lib::MMRStore for BridgedMmrStorage { - fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result> { - Ok(self.nodes.get(&pos).cloned()) - } - - fn append(&mut self, pos: NodeIndex, elems: Vec) -> mmr_lib::Result<()> { - for (i, elem) in elems.into_iter().enumerate() { - self.nodes.insert(pos + i as NodeIndex, elem); - } - Ok(()) - } -} - -impl ChainBuilder { - /// Creates new chain builder with given validator set size. - pub fn new(initial_validators_count: u32) -> Self { - ChainBuilder { - headers: Vec::new(), - validator_set_id: 0, - validator_keys: validator_pairs(0, initial_validators_count), - mmr: mmr_lib::MMR::new(0, BridgedMmrStorage { nodes: HashMap::new() }), - } - } - - /// Get header with given number. - pub fn header(&self, number: TestBridgedBlockNumber) -> HeaderAndCommitment { - self.headers[number as usize - 1].clone() - } - - /// Returns single built header. - pub fn to_header(&self) -> HeaderAndCommitment { - assert_eq!(self.headers.len(), 1); - self.headers[0].clone() - } - - /// Returns built chain. - pub fn to_chain(&self) -> Vec { - self.headers.clone() - } - - /// Appends header, that has been finalized by BEEFY (so it has a linked signed commitment). - pub fn append_finalized_header(self) -> Self { - let next_validator_set_id = self.validator_set_id; - let next_validator_keys = self.validator_keys.clone(); - HeaderBuilder::with_chain(self, next_validator_set_id, next_validator_keys).finalize() - } - - /// Append multiple finalized headers at once. - pub fn append_finalized_headers(mut self, count: usize) -> Self { - for _ in 0..count { - self = self.append_finalized_header(); - } - self - } - - /// Appends header, that enacts new validator set. - /// - /// Such headers are explicitly finalized by BEEFY. - pub fn append_handoff_header(self, next_validators_len: u32) -> Self { - let new_validator_set_id = self.validator_set_id + 1; - let new_validator_pairs = - validator_pairs(rand::thread_rng().gen::() % (u32::MAX / 2), next_validators_len); - - HeaderBuilder::with_chain(self, new_validator_set_id, new_validator_pairs).finalize() - } - - /// Append several default header without commitment. - pub fn append_default_headers(mut self, count: usize) -> Self { - for _ in 0..count { - let next_validator_set_id = self.validator_set_id; - let next_validator_keys = self.validator_keys.clone(); - self = - HeaderBuilder::with_chain(self, next_validator_set_id, next_validator_keys).build() - } - self - } -} - -/// Custom header builder. -pub struct HeaderBuilder { - chain: ChainBuilder, - header: TestBridgedHeader, - leaf: TestBridgedRawMmrLeaf, - leaf_proof: Option, - next_validator_set_id: ValidatorSetId, - next_validator_keys: Vec, -} - -impl HeaderBuilder { - fn with_chain( - chain: ChainBuilder, - next_validator_set_id: ValidatorSetId, - next_validator_keys: Vec, - ) -> Self { - // we're starting with header#1, since header#0 is always finalized - let header_number = chain.headers.len() as TestBridgedBlockNumber + 1; - let header = TestBridgedHeader::new( - header_number, - Default::default(), - Default::default(), - chain.headers.last().map(|h| h.header.hash()).unwrap_or_default(), - Default::default(), - ); - - let next_validators = - next_validator_keys.iter().map(|pair| pair.public()).collect::>(); - let next_validators_mmr_root = - get_authorities_mmr_root::(next_validators.iter()); - let leaf = sp_consensus_beefy::mmr::MmrLeaf { - version: MmrLeafVersion::new(1, 0), - parent_number_and_hash: (header.number().saturating_sub(1), *header.parent_hash()), - beefy_next_authority_set: BeefyNextAuthoritySet { - id: next_validator_set_id, - len: next_validators.len() as u32, - keyset_commitment: next_validators_mmr_root, - }, - leaf_extra: (), - }; - - HeaderBuilder { - chain, - header, - leaf, - leaf_proof: None, - next_validator_keys, - next_validator_set_id, - } - } - - /// Customize generated proof of header MMR leaf. - /// - /// Can only be called once. - pub fn customize_proof( - mut self, - f: impl FnOnce(TestBridgedMmrProof) -> TestBridgedMmrProof, - ) -> Self { - assert!(self.leaf_proof.is_none()); - - let leaf_hash = TestBridgedMmrHashing::hash(&self.leaf.encode()); - let node = TestBridgedMmrNode::Hash(leaf_hash); - let leaf_position = self.chain.mmr.push(node).unwrap(); - - let proof = self.chain.mmr.gen_proof(vec![leaf_position]).unwrap(); - // genesis has no leaf => leaf index is header number minus 1 - let leaf_index = *self.header.number() - 1; - let leaf_count = *self.header.number(); - self.leaf_proof = Some(f(TestBridgedMmrProof { - leaf_indices: vec![leaf_index], - leaf_count, - items: proof.proof_items().iter().map(|i| i.hash()).collect(), - })); - - self - } - - /// Build header without commitment. - pub fn build(mut self) -> ChainBuilder { - if self.leaf_proof.is_none() { - self = self.customize_proof(|proof| proof); - } - - let validators = - self.chain.validator_keys.iter().map(|pair| pair.public()).collect::>(); - self.chain.headers.push(HeaderAndCommitment { - header: self.header, - commitment: None, - validator_set: TestBridgedValidatorSet::new(validators, self.chain.validator_set_id) - .unwrap(), - leaf: self.leaf, - leaf_proof: self.leaf_proof.expect("guaranteed by the customize_proof call above; qed"), - mmr_root: self.chain.mmr.get_root().unwrap().hash(), - }); - - self.chain.validator_set_id = self.next_validator_set_id; - self.chain.validator_keys = self.next_validator_keys; - - self.chain - } - - /// Build header with commitment. - pub fn finalize(self) -> ChainBuilder { - let validator_count = self.chain.validator_keys.len(); - let current_validator_set_id = self.chain.validator_set_id; - let current_validator_set_keys = self.chain.validator_keys.clone(); - let mut chain = self.build(); - - let last_header = chain.headers.last_mut().expect("added by append_header; qed"); - last_header.commitment = Some(sign_commitment( - Commitment { - payload: BeefyPayload::from_single_entry( - MMR_ROOT_PAYLOAD_ID, - chain.mmr.get_root().unwrap().hash().encode(), - ), - block_number: *last_header.header.number(), - validator_set_id: current_validator_set_id, - }, - ¤t_validator_set_keys, - validator_count * 2 / 3 + 1, - )); - - chain - } -} - -/// Default Merging & Hashing behavior for MMR. -pub struct BridgedMmrHashMerge; - -impl mmr_lib::Merge for BridgedMmrHashMerge { - type Item = TestBridgedMmrNode; - - fn merge(left: &Self::Item, right: &Self::Item) -> Self::Item { - let mut concat = left.hash().as_ref().to_vec(); - concat.extend_from_slice(right.hash().as_ref()); - - TestBridgedMmrNode::Hash(TestBridgedMmrHashing::hash(&concat)) - } -} diff --git a/modules/beefy/src/utils.rs b/modules/beefy/src/utils.rs deleted file mode 100644 index ce7a116308d1626fd400e2ffa79e44828aea63f2..0000000000000000000000000000000000000000 --- a/modules/beefy/src/utils.rs +++ /dev/null @@ -1,361 +0,0 @@ -use crate::{ - BridgedBeefyAuthorityId, BridgedBeefyAuthoritySet, BridgedBeefyAuthoritySetInfo, - BridgedBeefyMmrLeaf, BridgedBeefySignedCommitment, BridgedChain, BridgedMmrHash, - BridgedMmrHashing, BridgedMmrProof, Config, Error, LOG_TARGET, -}; -use bp_beefy::{merkle_root, verify_mmr_leaves_proof, BeefyAuthorityId, MmrDataOrHash}; -use codec::Encode; -use frame_support::ensure; -use sp_runtime::traits::{Convert, Hash}; -use sp_std::{vec, vec::Vec}; - -type BridgedMmrDataOrHash = MmrDataOrHash, BridgedBeefyMmrLeaf>; -/// A way to encode validator id to the BEEFY merkle tree leaf. -type BridgedBeefyAuthorityIdToMerkleLeaf = - bp_beefy::BeefyAuthorityIdToMerkleLeafOf>; - -/// Get the MMR root for a collection of validators. -pub(crate) fn get_authorities_mmr_root< - 'a, - T: Config, - I: 'static, - V: Iterator>, ->( - authorities: V, -) -> BridgedMmrHash { - let merkle_leafs = authorities - .cloned() - .map(BridgedBeefyAuthorityIdToMerkleLeaf::::convert) - .collect::>(); - merkle_root::, _>(merkle_leafs) -} - -fn verify_authority_set, I: 'static>( - authority_set_info: &BridgedBeefyAuthoritySetInfo, - authority_set: &BridgedBeefyAuthoritySet, -) -> Result<(), Error> { - ensure!(authority_set.id() == authority_set_info.id, Error::::InvalidValidatorSetId); - ensure!( - authority_set.len() == authority_set_info.len as usize, - Error::::InvalidValidatorSetLen - ); - - // Ensure that the authority set that signed the commitment is the expected one. - let root = get_authorities_mmr_root::(authority_set.validators().iter()); - ensure!(root == authority_set_info.keyset_commitment, Error::::InvalidValidatorSetRoot); - - Ok(()) -} - -/// Number of correct signatures, required from given validators set to accept signed -/// commitment. -/// -/// We're using 'conservative' approach here, where signatures of `2/3+1` validators are -/// required.. -pub(crate) fn signatures_required(validators_len: usize) -> usize { - validators_len - validators_len.saturating_sub(1) / 3 -} - -fn verify_signatures, I: 'static>( - commitment: &BridgedBeefySignedCommitment, - authority_set: &BridgedBeefyAuthoritySet, -) -> Result<(), Error> { - ensure!( - commitment.signatures.len() == authority_set.len(), - Error::::InvalidCommitmentSignaturesLen - ); - - // Ensure that the commitment was signed by enough authorities. - let msg = commitment.commitment.encode(); - let mut missing_signatures = signatures_required(authority_set.len()); - for (idx, (authority, maybe_sig)) in - authority_set.validators().iter().zip(commitment.signatures.iter()).enumerate() - { - if let Some(sig) = maybe_sig { - if authority.verify(sig, &msg) { - missing_signatures = missing_signatures.saturating_sub(1); - if missing_signatures == 0 { - break - } - } else { - log::debug!( - target: LOG_TARGET, - "Signed commitment contains incorrect signature of validator {} ({:?}): {:?}", - idx, - authority, - sig, - ); - } - } - } - ensure!(missing_signatures == 0, Error::::NotEnoughCorrectSignatures); - - Ok(()) -} - -/// Extract MMR root from commitment payload. -fn extract_mmr_root, I: 'static>( - commitment: &BridgedBeefySignedCommitment, -) -> Result, Error> { - commitment - .commitment - .payload - .get_decoded(&bp_beefy::MMR_ROOT_PAYLOAD_ID) - .ok_or(Error::MmrRootMissingFromCommitment) -} - -pub(crate) fn verify_commitment, I: 'static>( - commitment: &BridgedBeefySignedCommitment, - authority_set_info: &BridgedBeefyAuthoritySetInfo, - authority_set: &BridgedBeefyAuthoritySet, -) -> Result, Error> { - // Ensure that the commitment is signed by the best known BEEFY validator set. - ensure!( - commitment.commitment.validator_set_id == authority_set_info.id, - Error::::InvalidCommitmentValidatorSetId - ); - ensure!( - commitment.signatures.len() == authority_set_info.len as usize, - Error::::InvalidCommitmentSignaturesLen - ); - - verify_authority_set(authority_set_info, authority_set)?; - verify_signatures(commitment, authority_set)?; - - extract_mmr_root(commitment) -} - -/// Verify MMR proof of given leaf. -pub(crate) fn verify_beefy_mmr_leaf, I: 'static>( - mmr_leaf: &BridgedBeefyMmrLeaf, - mmr_proof: BridgedMmrProof, - mmr_root: BridgedMmrHash, -) -> Result<(), Error> { - let mmr_proof_leaf_count = mmr_proof.leaf_count; - let mmr_proof_length = mmr_proof.items.len(); - - // Verify the mmr proof for the provided leaf. - let mmr_leaf_hash = BridgedMmrHashing::::hash(&mmr_leaf.encode()); - verify_mmr_leaves_proof( - mmr_root, - vec![BridgedMmrDataOrHash::::Hash(mmr_leaf_hash)], - mmr_proof, - ) - .map_err(|e| { - log::error!( - target: LOG_TARGET, - "MMR proof of leaf {:?} (root: {:?}, leaf count: {}, len: {}) \ - verification has failed with error: {:?}", - mmr_leaf_hash, - mmr_root, - mmr_proof_leaf_count, - mmr_proof_length, - e, - ); - - Error::::MmrProofVerificationFailed - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{mock::*, mock_chain::*, *}; - use bp_beefy::{BeefyPayload, MMR_ROOT_PAYLOAD_ID}; - use frame_support::{assert_noop, assert_ok}; - use sp_consensus_beefy::ValidatorSet; - - #[test] - fn submit_commitment_checks_metadata() { - run_test_with_initialize(8, || { - // Fails if `commitment.commitment.validator_set_id` differs. - let mut header = ChainBuilder::new(8).append_finalized_header().to_header(); - header.customize_commitment( - |commitment| { - commitment.validator_set_id += 1; - }, - &validator_pairs(0, 8), - 6, - ); - assert_noop!( - import_commitment(header), - Error::::InvalidCommitmentValidatorSetId, - ); - - // Fails if `commitment.signatures.len()` differs. - let mut header = ChainBuilder::new(8).append_finalized_header().to_header(); - header.customize_signatures(|signatures| { - signatures.pop(); - }); - assert_noop!( - import_commitment(header), - Error::::InvalidCommitmentSignaturesLen, - ); - }); - } - - #[test] - fn submit_commitment_checks_validator_set() { - run_test_with_initialize(8, || { - // Fails if `ValidatorSet::id` differs. - let mut header = ChainBuilder::new(8).append_finalized_header().to_header(); - header.validator_set = ValidatorSet::new(validator_ids(0, 8), 1).unwrap(); - assert_noop!( - import_commitment(header), - Error::::InvalidValidatorSetId, - ); - - // Fails if `ValidatorSet::len()` differs. - let mut header = ChainBuilder::new(8).append_finalized_header().to_header(); - header.validator_set = ValidatorSet::new(validator_ids(0, 5), 0).unwrap(); - assert_noop!( - import_commitment(header), - Error::::InvalidValidatorSetLen, - ); - - // Fails if the validators differ. - let mut header = ChainBuilder::new(8).append_finalized_header().to_header(); - header.validator_set = ValidatorSet::new(validator_ids(3, 8), 0).unwrap(); - assert_noop!( - import_commitment(header), - Error::::InvalidValidatorSetRoot, - ); - }); - } - - #[test] - fn submit_commitment_checks_signatures() { - run_test_with_initialize(20, || { - // Fails when there aren't enough signatures. - let mut header = ChainBuilder::new(20).append_finalized_header().to_header(); - header.customize_signatures(|signatures| { - let first_signature_idx = signatures.iter().position(Option::is_some).unwrap(); - signatures[first_signature_idx] = None; - }); - assert_noop!( - import_commitment(header), - Error::::NotEnoughCorrectSignatures, - ); - - // Fails when there aren't enough correct signatures. - let mut header = ChainBuilder::new(20).append_finalized_header().to_header(); - header.customize_signatures(|signatures| { - let first_signature_idx = signatures.iter().position(Option::is_some).unwrap(); - let last_signature_idx = signatures.len() - - signatures.iter().rev().position(Option::is_some).unwrap() - - 1; - signatures[first_signature_idx] = signatures[last_signature_idx].clone(); - }); - assert_noop!( - import_commitment(header), - Error::::NotEnoughCorrectSignatures, - ); - - // Returns Ok(()) when there are enough signatures, even if some are incorrect. - let mut header = ChainBuilder::new(20).append_finalized_header().to_header(); - header.customize_signatures(|signatures| { - let first_signature_idx = signatures.iter().position(Option::is_some).unwrap(); - let first_missing_signature_idx = - signatures.iter().position(Option::is_none).unwrap(); - signatures[first_missing_signature_idx] = signatures[first_signature_idx].clone(); - }); - assert_ok!(import_commitment(header)); - }); - } - - #[test] - fn submit_commitment_checks_mmr_proof() { - run_test_with_initialize(1, || { - let validators = validator_pairs(0, 1); - - // Fails if leaf is not for parent. - let mut header = ChainBuilder::new(1).append_finalized_header().to_header(); - header.leaf.parent_number_and_hash.0 += 1; - assert_noop!( - import_commitment(header), - Error::::MmrProofVerificationFailed, - ); - - // Fails if mmr proof is incorrect. - let mut header = ChainBuilder::new(1).append_finalized_header().to_header(); - header.leaf_proof.leaf_indices[0] += 1; - assert_noop!( - import_commitment(header), - Error::::MmrProofVerificationFailed, - ); - - // Fails if mmr root is incorrect. - let mut header = ChainBuilder::new(1).append_finalized_header().to_header(); - // Replace MMR root with zeroes. - header.customize_commitment( - |commitment| { - commitment.payload = - BeefyPayload::from_single_entry(MMR_ROOT_PAYLOAD_ID, [0u8; 32].encode()); - }, - &validators, - 1, - ); - assert_noop!( - import_commitment(header), - Error::::MmrProofVerificationFailed, - ); - }); - } - - #[test] - fn submit_commitment_extracts_mmr_root() { - run_test_with_initialize(1, || { - let validators = validator_pairs(0, 1); - - // Fails if there is no mmr root in the payload. - let mut header = ChainBuilder::new(1).append_finalized_header().to_header(); - // Remove MMR root from the payload. - header.customize_commitment( - |commitment| { - commitment.payload = BeefyPayload::from_single_entry(*b"xy", vec![]); - }, - &validators, - 1, - ); - assert_noop!( - import_commitment(header), - Error::::MmrRootMissingFromCommitment, - ); - - // Fails if mmr root can't be decoded. - let mut header = ChainBuilder::new(1).append_finalized_header().to_header(); - // MMR root is a 32-byte array and we have replaced it with single byte - header.customize_commitment( - |commitment| { - commitment.payload = - BeefyPayload::from_single_entry(MMR_ROOT_PAYLOAD_ID, vec![42]); - }, - &validators, - 1, - ); - assert_noop!( - import_commitment(header), - Error::::MmrRootMissingFromCommitment, - ); - }); - } - - #[test] - fn submit_commitment_stores_valid_data() { - run_test_with_initialize(20, || { - let header = ChainBuilder::new(20).append_handoff_header(30).to_header(); - assert_ok!(import_commitment(header.clone())); - - assert_eq!(ImportedCommitmentsInfo::::get().unwrap().best_block_number, 1); - assert_eq!(CurrentAuthoritySetInfo::::get().id, 1); - assert_eq!(CurrentAuthoritySetInfo::::get().len, 30); - assert_eq!( - ImportedCommitments::::get(1).unwrap(), - bp_beefy::ImportedCommitment { - parent_number_and_hash: (0, [0; 32].into()), - mmr_root: header.mmr_root, - }, - ); - }); - } -} diff --git a/modules/grandpa/Cargo.toml b/modules/grandpa/Cargo.toml deleted file mode 100644 index 1a5bfeff16e9db3a63d2acea3ec644be877891a3..0000000000000000000000000000000000000000 --- a/modules/grandpa/Cargo.toml +++ /dev/null @@ -1,71 +0,0 @@ -[package] -name = "pallet-bridge-grandpa" -version = "0.7.0" -description = "Module implementing GRANDPA on-chain light client used for bridging consensus of substrate-based chains." -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } -log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Bridge Dependencies - -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -# Optional Benchmarking Dependencies -bp-test-utils = { path = "../../primitives/test-utils", default-features = false, optional = true } -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true } - -[dev-dependencies] -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-runtime/std", - "bp-test-utils/std", - "codec/std", - "finality-grandpa/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "sp-consensus-grandpa/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", -] -runtime-benchmarks = [ - "bp-test-utils", - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/modules/grandpa/README.md b/modules/grandpa/README.md deleted file mode 100644 index 43ee5c316d1b76ec8fc94b0c3819b1340a6ce75c..0000000000000000000000000000000000000000 --- a/modules/grandpa/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# Bridge GRANDPA Pallet - -The bridge GRANDPA pallet is a light client for the GRANDPA finality gadget, running at the bridged chain. -It may import headers and their GRANDPA finality proofs (justifications) of the bridged chain. Imported -headers then may be used to verify storage proofs by other pallets. This makes the bridge GRANDPA pallet -a basic pallet of all bridges with Substrate-based chains. It is used by all bridge types (bridge between -standalone chains, between parachains and any combination of those) and is used by other bridge pallets. -It is used by the parachains light client (bridge parachains pallet) and by messages pallet. - -## A Brief Introduction into GRANDPA Finality - -You can find detailed information on GRANDPA, by exploring its [repository](https://github.com/paritytech/finality-grandpa). -Here is the minimal reqiuired GRANDPA information to understand how pallet works. - -Any Substrate chain may use different block authorship algorithms (like BABE or Aura) to determine block producers and -generate blocks. This has nothing common with finality, though - the task of block authorship is to coordinate -blocks generation. Any block may be reverted (if there's a fork) if it is not finalized. The finality solution -for (standalone) Substrate-based chains is the GRANDPA finality gadget. If some block is finalized by the gadget, it -can't be reverted. - -In GRANDPA, there are validators, identified by their public keys. They select some generated block and produce -signatures on this block hash. If there are enough (more than `2 / 3 * N`, where `N` is number of validators) -signatures, then the block is considered finalized. The set of signatures for the block is called justification. -Anyone who knows the public keys of validators is able to verify GRANDPA justification and that it is generated -for provided header. - -There are two main things in GRANDPA that help building light clients: - -- there's no need to import all headers of the bridged chain. Light client may import finalized headers or just - some of finalized headders that it consider useful. While the validators set stays the same, the client may - import any header that is finalized by this set; - -- when validators set changes, the GRANDPA gadget adds next set to the header. So light client doesn't need to - verify storage proofs when this happens - it only needs to look at the header and see if it changes the set. - Once set is changed, all following justifications are generated by the new set. Header that is changing the - set is called "mandatory" in the pallet. As the name says, the light client need to import all such headers - to be able to operate properly. - -## Pallet Operations - -The main entrypoint of the pallet is the `submit_finality_proof_ex` call. It has three arguments - the finalized -headers, associated GRANDPA justification and ID of the authority set that has generated this justification. The -call simply verifies the justification using current validators set and checks if header is better than the -previous best header. If both checks are passed, the header (only its useful fields) is inserted into the runtime -storage and may be used by other pallets to verify storage proofs. - -The submitter pays regular fee for submitting all headers, except for the mandatory header. Since it is -required for the pallet operations, submitting such header is free. So if you're ok with session-length -lags (meaning that there's exactly 1 mandatory header per session), the cost of pallet calls is zero. - -When the pallet sees mandatory header, it updates the validators set with the set from the header. All -following justifications (until next mandatory header) must be generated by this new set. - -## Pallet Initialization - -As the previous section states, there are two things that are mandatory for pallet operations: best finalized -header and the current validators set. Without it the pallet can't import any headers. But how to provide -initial values for these fields? There are two options. - -First option, while it is easier, doesn't work in all cases. It is to start chain with initial header and -validators set specified in the chain specification. This won't work, however, if we want to add bridge -to already started chain. - -For the latter case we have the `initialize` call. It accepts the initial header and initial validators set. -The call may be called by the governance, root or by the pallet owner (if it is set). - -## Non-Essential Functionality - -There may be a special account in every runtime where the bridge GRANDPA module is deployed. This -account, named 'module owner', is like a module-level sudo account - he's able to halt and -resume all module operations without requiring runtime upgrade. Calls that are related to this -account are: - -- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; - -- `fn set_operating_mode()`: the module owner (or sudo account) may call this function to stop all - module operations. After this call, all finality proofs will be rejected until further `set_operating_mode` call'. - This call may be used when something extraordinary happens with the bridge; - -- `fn initialize()`: module owner may call this function to initialize the bridge. - -If pallet owner is not defined, the governance may be used to make those calls. - -## Signed Extension to Reject Obsolete Headers - -It'd be better for anyone (for chain and for submitters) to reject all transactions that are submitting -already known headers to the pallet. This way, we leave block space to other useful transactions and -we don't charge concurrent submitters for their honest actions. - -To deal with that, we have a [signed extension](./src/call_ext) that may be added to the runtime. -It does exactly what is required - rejects all transactions with already known headers. The submitter -pays nothing for such transactions - they're simply removed from the transaction pool, when the block -is built. - -You may also take a look at the [`generate_bridge_reject_obsolete_headers_and_messages`](../../bin/runtime-common/src/lib.rs) -macro that bundles several similar signed extensions in a single one. - -## GRANDPA Finality Relay - -We have an offchain actor, who is watching for GRANDPA justifications and submits them to the bridged chain. -It is the finality relay - you may look at the [crate level documentation and the code](../../relays/finality/). diff --git a/modules/grandpa/src/benchmarking.rs b/modules/grandpa/src/benchmarking.rs deleted file mode 100644 index 11033373ce478fa9fefb613a1377449bb77daf1d..0000000000000000000000000000000000000000 --- a/modules/grandpa/src/benchmarking.rs +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Benchmarks for the GRANDPA Pallet. -//! -//! The main dispatchable for the GRANDPA pallet is `submit_finality_proof_ex`. Our benchmarks -//! are based around `submit_finality_proof`, though - from weight PoV they are the same calls. -//! There are to main factors which affect finality proof verification: -//! -//! 1. The number of `votes-ancestries` in the justification -//! 2. The number of `pre-commits` in the justification -//! -//! Vote ancestries are the headers between (`finality_target`, `head_of_chain`], where -//! `header_of_chain` is a descendant of `finality_target`. -//! -//! Pre-commits are messages which are signed by validators at the head of the chain they think is -//! the best. -//! -//! Consider the following: -//! -//! / B <- C' -//! A <- B <- C -//! -//! The common ancestor of both forks is block A, so this is what GRANDPA will finalize. In order to -//! verify this we will have vote ancestries of `[B, C, B', C']` and pre-commits `[C, C']`. -//! -//! Note that the worst case scenario here would be a justification where each validator has it's -//! own fork which is `SESSION_LENGTH` blocks long. - -use crate::*; - -use bp_header_chain::justification::required_justification_precommits; -use bp_runtime::BasicOperatingMode; -use bp_test_utils::{ - accounts, make_justification_for_header, JustificationGeneratorParams, TEST_GRANDPA_ROUND, - TEST_GRANDPA_SET_ID, -}; -use frame_benchmarking::{benchmarks_instance_pallet, whitelisted_caller}; -use frame_system::RawOrigin; -use sp_consensus_grandpa::AuthorityId; -use sp_runtime::traits::{One, Zero}; -use sp_std::vec::Vec; - -/// The maximum number of vote ancestries to include in a justification. -/// -/// In practice this would be limited by the session length (number of blocks a single authority set -/// can produce) of a given chain. -const MAX_VOTE_ANCESTRIES: u32 = 1000; - -// `1..MAX_VOTE_ANCESTRIES` is too large && benchmarks are running for almost 40m (steps=50, -// repeat=20) on a decent laptop, which is too much. Since we're building linear function here, -// let's just select some limited subrange for benchmarking. -const MAX_VOTE_ANCESTRIES_RANGE_BEGIN: u32 = MAX_VOTE_ANCESTRIES / 20; -const MAX_VOTE_ANCESTRIES_RANGE_END: u32 = - MAX_VOTE_ANCESTRIES_RANGE_BEGIN + MAX_VOTE_ANCESTRIES_RANGE_BEGIN; - -// the same with validators - if there are too much validators, let's run benchmarks on subrange -fn precommits_range_end, I: 'static>() -> u32 { - let max_bridged_authorities = T::BridgedChain::MAX_AUTHORITIES_COUNT; - if max_bridged_authorities > 128 { - sp_std::cmp::max(128, max_bridged_authorities / 5) - } else { - max_bridged_authorities - }; - required_justification_precommits(max_bridged_authorities) -} - -/// Prepare header and its justification to submit using `submit_finality_proof`. -fn prepare_benchmark_data, I: 'static>( - precommits: u32, - ancestors: u32, -) -> (BridgedHeader, GrandpaJustification>) { - // going from precommits to total authorities count - let total_authorities_count = (3 * precommits - 1) / 2; - - let authority_list = accounts(total_authorities_count as u16) - .iter() - .map(|id| (AuthorityId::from(*id), 1)) - .collect::>(); - - let genesis_header: BridgedHeader = bp_test_utils::test_header(Zero::zero()); - let genesis_hash = genesis_header.hash(); - let init_data = InitializationData { - header: Box::new(genesis_header), - authority_list, - set_id: TEST_GRANDPA_SET_ID, - operating_mode: BasicOperatingMode::Normal, - }; - - bootstrap_bridge::(init_data); - assert!(>::contains_key(genesis_hash)); - - let header: BridgedHeader = bp_test_utils::test_header(One::one()); - let params = JustificationGeneratorParams { - header: header.clone(), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: accounts(precommits as u16).iter().map(|k| (*k, 1)).collect::>(), - ancestors, - forks: 1, - }; - let justification = make_justification_for_header(params); - (header, justification) -} - -benchmarks_instance_pallet! { - // This is the "gold standard" benchmark for this extrinsic, and it's what should be used to - // annotate the weight in the pallet. - submit_finality_proof { - let p in 1 .. precommits_range_end::(); - let v in MAX_VOTE_ANCESTRIES_RANGE_BEGIN..MAX_VOTE_ANCESTRIES_RANGE_END; - let caller: T::AccountId = whitelisted_caller(); - let (header, justification) = prepare_benchmark_data::(p, v); - }: submit_finality_proof(RawOrigin::Signed(caller), Box::new(header), justification) - verify { - let genesis_header: BridgedHeader = bp_test_utils::test_header(Zero::zero()); - let header: BridgedHeader = bp_test_utils::test_header(One::one()); - let expected_hash = header.hash(); - - // check that the header#1 has been inserted - assert_eq!(>::get().unwrap().1, expected_hash); - assert!(>::contains_key(expected_hash)); - - // check that the header#0 has been pruned - assert!(!>::contains_key(genesis_header.hash())); - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) -} diff --git a/modules/grandpa/src/call_ext.rs b/modules/grandpa/src/call_ext.rs deleted file mode 100644 index 969063ddd21f124aca41df6d3cde253c47081fc6..0000000000000000000000000000000000000000 --- a/modules/grandpa/src/call_ext.rs +++ /dev/null @@ -1,718 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - weights::WeightInfo, BestFinalized, BridgedBlockNumber, BridgedHeader, Config, - CurrentAuthoritySet, Error, FreeHeadersRemaining, Pallet, -}; -use bp_header_chain::{ - justification::GrandpaJustification, max_expected_submit_finality_proof_arguments_size, - ChainWithGrandpa, GrandpaConsensusLogReader, -}; -use bp_runtime::{BlockNumberOf, Chain, OwnedBridgeModule}; -use codec::Encode; -use frame_support::{ - dispatch::CallableCallFor, - traits::{Get, IsSubType}, - weights::Weight, -}; -use sp_consensus_grandpa::SetId; -use sp_runtime::{ - traits::{CheckedSub, Header, Zero}, - transaction_validity::{InvalidTransaction, TransactionValidityError}, - RuntimeDebug, SaturatedConversion, -}; - -/// Info about a `SubmitParachainHeads` call which tries to update a single parachain. -#[derive(Copy, Clone, PartialEq, RuntimeDebug)] -pub struct SubmitFinalityProofInfo { - /// Number of the finality target. - pub block_number: N, - /// An identifier of the validators set that has signed the submitted justification. - /// It might be `None` if deprecated version of the `submit_finality_proof` is used. - pub current_set_id: Option, - /// If `true`, then the call proves new **mandatory** header. - pub is_mandatory: bool, - /// If `true`, then the call must be free (assuming that everything else is valid) to - /// be treated as valid. - pub is_free_execution_expected: bool, - /// Extra weight that we assume is included in the call. - /// - /// We have some assumptions about headers and justifications of the bridged chain. - /// We know that if our assumptions are correct, then the call must not have the - /// weight above some limit. The fee paid for weight above that limit, is never refunded. - pub extra_weight: Weight, - /// Extra size (in bytes) that we assume are included in the call. - /// - /// We have some assumptions about headers and justifications of the bridged chain. - /// We know that if our assumptions are correct, then the call must not have the - /// weight above some limit. The fee paid for bytes above that limit, is never refunded. - pub extra_size: u32, -} - -/// Verified `SubmitFinalityProofInfo`. -#[derive(Copy, Clone, PartialEq, RuntimeDebug)] -pub struct VerifiedSubmitFinalityProofInfo { - /// Base call information. - pub base: SubmitFinalityProofInfo, - /// A difference between bundled bridged header and best bridged header known to us - /// before the call. - pub improved_by: N, -} - -impl SubmitFinalityProofInfo { - /// Returns `true` if call size/weight is below our estimations for regular calls. - pub fn fits_limits(&self) -> bool { - self.extra_weight.is_zero() && self.extra_size.is_zero() - } -} - -/// Helper struct that provides methods for working with the `SubmitFinalityProof` call. -pub struct SubmitFinalityProofHelper, I: 'static> { - _phantom_data: sp_std::marker::PhantomData<(T, I)>, -} - -impl, I: 'static> SubmitFinalityProofHelper { - /// Returns `true` if we may fit more free headers into the current block. If `false` is - /// returned, the call will be paid even if `is_free_execution_expected` has been set - /// to `true`. - pub fn can_import_anything_for_free() -> bool { - // `unwrap_or(u32::MAX)` means that if `FreeHeadersRemaining` is `None`, we may accept - // this header for free. That is a small cheat - is is `None` if executed outside of - // transaction (e.g. during block initialization). Normal relayer would never submit - // such calls, but if he did, that is not our problem. During normal transactions, - // the `FreeHeadersRemaining` is always `Some(_)`. - let free_headers_remaining = FreeHeadersRemaining::::get().unwrap_or(u32::MAX); - if free_headers_remaining == 0 { - return false - } - - true - } - - /// Check that the: (1) GRANDPA head provided by the `SubmitFinalityProof` is better than the - /// best one we know (2) if `current_set_id` matches the current authority set id, if specified - /// and (3) whether transaction MAY be free for the submitter if `is_free_execution_expected` - /// is `true`. - /// - /// Returns number of headers between the current best finalized header, known to the pallet - /// and the bundled header. - pub fn check_obsolete_from_extension( - call_info: &SubmitFinalityProofInfo>, - ) -> Result, Error> { - // do basic checks first - let improved_by = Self::check_obsolete(call_info.block_number, call_info.current_set_id)?; - - // if submitter has NOT specified that it wants free execution, then we are done - if !call_info.is_free_execution_expected { - return Ok(improved_by); - } - - // else - if we can not accept more free headers, "reject" the transaction - if !Self::can_import_anything_for_free() { - log::trace!( - target: crate::LOG_TARGET, - "Cannot accept free {:?} header {:?}. No more free slots remaining", - T::BridgedChain::ID, - call_info.block_number, - ); - - return Err(Error::::FreeHeadersLimitExceded); - } - - // ensure that the `improved_by` is larger than the configured free interval - if !call_info.is_mandatory { - if let Some(free_headers_interval) = T::FreeHeadersInterval::get() { - if improved_by < free_headers_interval.into() { - log::trace!( - target: crate::LOG_TARGET, - "Cannot accept free {:?} header {:?}. Too small difference \ - between submitted headers: {:?} vs {}", - T::BridgedChain::ID, - call_info.block_number, - improved_by, - free_headers_interval, - ); - - return Err(Error::::BelowFreeHeaderInterval); - } - } - } - - // we do not check whether the header matches free submission criteria here - it is the - // relayer responsibility to check that - - Ok(improved_by) - } - - /// Check that the GRANDPA head provided by the `SubmitFinalityProof` is better than the best - /// one we know. Additionally, checks if `current_set_id` matches the current authority set - /// id, if specified. This method is called by the call code and the transaction extension, - /// so it does not check the free execution. - /// - /// Returns number of headers between the current best finalized header, known to the pallet - /// and the bundled header. - pub fn check_obsolete( - finality_target: BlockNumberOf, - current_set_id: Option, - ) -> Result, Error> { - let best_finalized = BestFinalized::::get().ok_or_else(|| { - log::trace!( - target: crate::LOG_TARGET, - "Cannot finalize header {:?} because pallet is not yet initialized", - finality_target, - ); - >::NotInitialized - })?; - - let improved_by = match finality_target.checked_sub(&best_finalized.number()) { - Some(improved_by) if improved_by > Zero::zero() => improved_by, - _ => { - log::trace!( - target: crate::LOG_TARGET, - "Cannot finalize obsolete header: bundled {:?}, best {:?}", - finality_target, - best_finalized, - ); - - return Err(Error::::OldHeader) - }, - }; - - if let Some(current_set_id) = current_set_id { - let actual_set_id = >::get().set_id; - if current_set_id != actual_set_id { - log::trace!( - target: crate::LOG_TARGET, - "Cannot finalize header signed by unknown authority set: bundled {:?}, best {:?}", - current_set_id, - actual_set_id, - ); - - return Err(Error::::InvalidAuthoritySetId) - } - } - - Ok(improved_by) - } - - /// Check if the `SubmitFinalityProof` was successfully executed. - pub fn was_successful(finality_target: BlockNumberOf) -> bool { - match BestFinalized::::get() { - Some(best_finalized) => best_finalized.number() == finality_target, - None => false, - } - } -} - -/// Trait representing a call that is a sub type of this pallet's call. -pub trait CallSubType, I: 'static>: - IsSubType, T>> -{ - /// Extract finality proof info from a runtime call. - fn submit_finality_proof_info( - &self, - ) -> Option>> { - if let Some(crate::Call::::submit_finality_proof { finality_target, justification }) = - self.is_sub_type() - { - return Some(submit_finality_proof_info_from_args::( - finality_target, - justification, - None, - false, - )) - } else if let Some(crate::Call::::submit_finality_proof_ex { - finality_target, - justification, - current_set_id, - is_free_execution_expected, - }) = self.is_sub_type() - { - return Some(submit_finality_proof_info_from_args::( - finality_target, - justification, - Some(*current_set_id), - *is_free_execution_expected, - )) - } - - None - } - - /// Validate Grandpa headers in order to avoid "mining" transactions that provide outdated - /// bridged chain headers. Without this validation, even honest relayers may lose their funds - /// if there are multiple relays running and submitting the same information. - /// - /// Returns `Ok(None)` if the call is not the `submit_finality_proof` call of our pallet. - /// Returns `Ok(Some(_))` if the call is the `submit_finality_proof` call of our pallet and - /// we believe the call brings header that improves the pallet state. - /// Returns `Err(_)` if the call is the `submit_finality_proof` call of our pallet and we - /// believe that the call will fail. - fn check_obsolete_submit_finality_proof( - &self, - ) -> Result< - Option>>, - TransactionValidityError, - > - where - Self: Sized, - { - let call_info = match self.submit_finality_proof_info() { - Some(finality_proof) => finality_proof, - _ => return Ok(None), - }; - - if Pallet::::ensure_not_halted().is_err() { - return Err(InvalidTransaction::Call.into()) - } - - let result = SubmitFinalityProofHelper::::check_obsolete_from_extension(&call_info); - match result { - Ok(improved_by) => - Ok(Some(VerifiedSubmitFinalityProofInfo { base: call_info, improved_by })), - Err(Error::::OldHeader) => Err(InvalidTransaction::Stale.into()), - Err(_) => Err(InvalidTransaction::Call.into()), - } - } -} - -impl, I: 'static> CallSubType for T::RuntimeCall where - T::RuntimeCall: IsSubType, T>> -{ -} - -/// Extract finality proof info from the submitted header and justification. -pub(crate) fn submit_finality_proof_info_from_args, I: 'static>( - finality_target: &BridgedHeader, - justification: &GrandpaJustification>, - current_set_id: Option, - is_free_execution_expected: bool, -) -> SubmitFinalityProofInfo> { - let block_number = *finality_target.number(); - - // the `submit_finality_proof` call will reject justifications with invalid, duplicate, - // unknown and extra signatures. It'll also reject justifications with less than necessary - // signatures. So we do not care about extra weight because of additional signatures here. - let precommits_len = justification.commit.precommits.len().saturated_into(); - let required_precommits = precommits_len; - - // We do care about extra weight because of more-than-expected headers in the votes - // ancestries. But we have problems computing extra weight for additional headers (weight of - // additional header is too small, so that our benchmarks aren't detecting that). So if there - // are more than expected headers in votes ancestries, we will treat the whole call weight - // as an extra weight. - let votes_ancestries_len = justification.votes_ancestries.len().saturated_into(); - let extra_weight = - if votes_ancestries_len > T::BridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY { - T::WeightInfo::submit_finality_proof(precommits_len, votes_ancestries_len) - } else { - Weight::zero() - }; - - // check if the `finality_target` is a mandatory header. If so, we are ready to refund larger - // size - let is_mandatory_finality_target = - GrandpaConsensusLogReader::>::find_scheduled_change( - finality_target.digest(), - ) - .is_some(); - - // we can estimate extra call size easily, without any additional significant overhead - let actual_call_size: u32 = finality_target - .encoded_size() - .saturating_add(justification.encoded_size()) - .saturated_into(); - let max_expected_call_size = max_expected_submit_finality_proof_arguments_size::( - is_mandatory_finality_target, - required_precommits, - ); - let extra_size = actual_call_size.saturating_sub(max_expected_call_size); - - SubmitFinalityProofInfo { - block_number, - current_set_id, - is_mandatory: is_mandatory_finality_target, - is_free_execution_expected, - extra_weight, - extra_size, - } -} - -#[cfg(test)] -mod tests { - use crate::{ - call_ext::CallSubType, - mock::{ - run_test, test_header, FreeHeadersInterval, RuntimeCall, TestBridgedChain, TestNumber, - TestRuntime, - }, - BestFinalized, Config, CurrentAuthoritySet, FreeHeadersRemaining, PalletOperatingMode, - StoredAuthoritySet, SubmitFinalityProofInfo, WeightInfo, - }; - use bp_header_chain::ChainWithGrandpa; - use bp_runtime::{BasicOperatingMode, HeaderId}; - use bp_test_utils::{ - make_default_justification, make_justification_for_header, JustificationGeneratorParams, - TEST_GRANDPA_SET_ID, - }; - use codec::Encode; - use frame_support::weights::Weight; - use sp_runtime::{testing::DigestItem, traits::Header as _, SaturatedConversion}; - - fn validate_block_submit(num: TestNumber) -> bool { - let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { - finality_target: Box::new(test_header(num)), - justification: make_default_justification(&test_header(num)), - // not initialized => zero - current_set_id: 0, - is_free_execution_expected: false, - }; - RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call, - )) - .is_ok() - } - - fn sync_to_header_10() { - let header10_hash = sp_core::H256::default(); - BestFinalized::::put(HeaderId(10, header10_hash)); - } - - #[test] - fn extension_rejects_obsolete_header() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#5 => tx is - // rejected - sync_to_header_10(); - assert!(!validate_block_submit(5)); - }); - } - - #[test] - fn extension_rejects_same_header() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#10 => tx is - // rejected - sync_to_header_10(); - assert!(!validate_block_submit(10)); - }); - } - - #[test] - fn extension_rejects_new_header_if_pallet_is_halted() { - run_test(|| { - // when pallet is halted => tx is rejected - sync_to_header_10(); - PalletOperatingMode::::put(BasicOperatingMode::Halted); - - assert!(!validate_block_submit(15)); - }); - } - - #[test] - fn extension_rejects_new_header_if_set_id_is_invalid() { - run_test(|| { - // when set id is different from the passed one => tx is rejected - sync_to_header_10(); - let next_set = StoredAuthoritySet::::try_new(vec![], 0x42).unwrap(); - CurrentAuthoritySet::::put(next_set); - - assert!(!validate_block_submit(15)); - }); - } - - #[test] - fn extension_rejects_new_header_if_free_execution_is_requested_and_free_submissions_are_not_accepted( - ) { - run_test(|| { - let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { - finality_target: Box::new(test_header(10 + FreeHeadersInterval::get() as u64)), - justification: make_default_justification(&test_header( - 10 + FreeHeadersInterval::get() as u64, - )), - current_set_id: 0, - is_free_execution_expected: true, - }; - sync_to_header_10(); - - // when we can accept free headers => Ok - FreeHeadersRemaining::::put(2); - assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call.clone(), - ),) - .is_ok()); - - // when we can NOT accept free headers => Err - FreeHeadersRemaining::::put(0); - assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call.clone(), - ),) - .is_err()); - - // when called outside of transaction => Ok - FreeHeadersRemaining::::kill(); - assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call, - ),) - .is_ok()); - }) - } - - #[test] - fn extension_rejects_new_header_if_free_execution_is_requested_and_improved_by_is_below_expected( - ) { - run_test(|| { - let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { - finality_target: Box::new(test_header(100)), - justification: make_default_justification(&test_header(100)), - current_set_id: 0, - is_free_execution_expected: true, - }; - sync_to_header_10(); - - // when `improved_by` is less than the free interval - BestFinalized::::put(HeaderId( - 100 - FreeHeadersInterval::get() as u64 + 1, - sp_core::H256::default(), - )); - assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call.clone(), - ),) - .is_err()); - - // when `improved_by` is equal to the free interval - BestFinalized::::put(HeaderId( - 100 - FreeHeadersInterval::get() as u64, - sp_core::H256::default(), - )); - assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call.clone(), - ),) - .is_ok()); - - // when `improved_by` is larger than the free interval - BestFinalized::::put(HeaderId( - 100 - FreeHeadersInterval::get() as u64 - 1, - sp_core::H256::default(), - )); - assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call.clone(), - ),) - .is_ok()); - - // when `improved_by` is less than the free interval BUT it is a mandatory header - let mut mandatory_header = test_header(100); - let consensus_log = sp_consensus_grandpa::ConsensusLog::::ScheduledChange( - sp_consensus_grandpa::ScheduledChange { - next_authorities: bp_test_utils::authority_list(), - delay: 0, - }, - ); - mandatory_header.digest = sp_runtime::Digest { - logs: vec![DigestItem::Consensus( - sp_consensus_grandpa::GRANDPA_ENGINE_ID, - consensus_log.encode(), - )], - }; - let justification = make_justification_for_header(JustificationGeneratorParams { - header: mandatory_header.clone(), - set_id: 1, - ..Default::default() - }); - let bridge_grandpa_call = crate::Call::::submit_finality_proof_ex { - finality_target: Box::new(mandatory_header), - justification, - current_set_id: 0, - is_free_execution_expected: true, - }; - BestFinalized::::put(HeaderId( - 100 - FreeHeadersInterval::get() as u64 + 1, - sp_core::H256::default(), - )); - assert!(RuntimeCall::check_obsolete_submit_finality_proof(&RuntimeCall::Grandpa( - bridge_grandpa_call.clone(), - ),) - .is_ok()); - }) - } - - #[test] - fn extension_accepts_new_header() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#15 => tx is - // accepted - sync_to_header_10(); - assert!(validate_block_submit(15)); - }); - } - - #[test] - fn submit_finality_proof_info_is_parsed() { - // when `submit_finality_proof` is used, `current_set_id` is set to `None` - let deprecated_call = - RuntimeCall::Grandpa(crate::Call::::submit_finality_proof { - finality_target: Box::new(test_header(42)), - justification: make_default_justification(&test_header(42)), - }); - assert_eq!( - deprecated_call.submit_finality_proof_info(), - Some(SubmitFinalityProofInfo { - block_number: 42, - current_set_id: None, - extra_weight: Weight::zero(), - extra_size: 0, - is_mandatory: false, - is_free_execution_expected: false, - }) - ); - - // when `submit_finality_proof_ex` is used, `current_set_id` is set to `Some` - let deprecated_call = - RuntimeCall::Grandpa(crate::Call::::submit_finality_proof_ex { - finality_target: Box::new(test_header(42)), - justification: make_default_justification(&test_header(42)), - current_set_id: 777, - is_free_execution_expected: false, - }); - assert_eq!( - deprecated_call.submit_finality_proof_info(), - Some(SubmitFinalityProofInfo { - block_number: 42, - current_set_id: Some(777), - extra_weight: Weight::zero(), - extra_size: 0, - is_mandatory: false, - is_free_execution_expected: false, - }) - ); - } - - #[test] - fn extension_returns_correct_extra_size_if_call_arguments_are_too_large() { - // when call arguments are below our limit => no refund - let small_finality_target = test_header(1); - let justification_params = JustificationGeneratorParams { - header: small_finality_target.clone(), - ..Default::default() - }; - let small_justification = make_justification_for_header(justification_params); - let small_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex { - finality_target: Box::new(small_finality_target), - justification: small_justification, - current_set_id: TEST_GRANDPA_SET_ID, - is_free_execution_expected: false, - }); - assert_eq!(small_call.submit_finality_proof_info().unwrap().extra_size, 0); - - // when call arguments are too large => partial refund - let mut large_finality_target = test_header(1); - large_finality_target - .digest_mut() - .push(DigestItem::Other(vec![42u8; 1024 * 1024])); - let justification_params = JustificationGeneratorParams { - header: large_finality_target.clone(), - ..Default::default() - }; - let large_justification = make_justification_for_header(justification_params); - let large_call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex { - finality_target: Box::new(large_finality_target), - justification: large_justification, - current_set_id: TEST_GRANDPA_SET_ID, - is_free_execution_expected: false, - }); - assert_ne!(large_call.submit_finality_proof_info().unwrap().extra_size, 0); - } - - #[test] - fn extension_returns_correct_extra_weight_if_there_are_too_many_headers_in_votes_ancestry() { - let finality_target = test_header(1); - let mut justification_params = JustificationGeneratorParams { - header: finality_target.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, - ..Default::default() - }; - - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY` headers => no refund - let justification = make_justification_for_header(justification_params.clone()); - let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex { - finality_target: Box::new(finality_target.clone()), - justification, - current_set_id: TEST_GRANDPA_SET_ID, - is_free_execution_expected: false, - }); - assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, Weight::zero()); - - // when there are `REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1` headers => full refund - justification_params.ancestors += 1; - let justification = make_justification_for_header(justification_params); - let call_weight = ::WeightInfo::submit_finality_proof( - justification.commit.precommits.len().saturated_into(), - justification.votes_ancestries.len().saturated_into(), - ); - let call = RuntimeCall::Grandpa(crate::Call::submit_finality_proof_ex { - finality_target: Box::new(finality_target), - justification, - current_set_id: TEST_GRANDPA_SET_ID, - is_free_execution_expected: false, - }); - assert_eq!(call.submit_finality_proof_info().unwrap().extra_weight, call_weight); - } - - #[test] - fn check_obsolete_submit_finality_proof_returns_correct_improved_by() { - run_test(|| { - fn make_call(number: u64) -> RuntimeCall { - RuntimeCall::Grandpa(crate::Call::::submit_finality_proof_ex { - finality_target: Box::new(test_header(number)), - justification: make_default_justification(&test_header(number)), - current_set_id: 0, - is_free_execution_expected: false, - }) - } - - sync_to_header_10(); - - // when the difference between headers is 1 - assert_eq!( - RuntimeCall::check_obsolete_submit_finality_proof(&make_call(11)) - .unwrap() - .unwrap() - .improved_by, - 1, - ); - - // when the difference between headers is 2 - assert_eq!( - RuntimeCall::check_obsolete_submit_finality_proof(&make_call(12)) - .unwrap() - .unwrap() - .improved_by, - 2, - ); - }) - } - - #[test] - fn check_obsolete_submit_finality_proof_ignores_other_calls() { - run_test(|| { - let call = - RuntimeCall::System(frame_system::Call::::remark { remark: vec![42] }); - - assert_eq!(RuntimeCall::check_obsolete_submit_finality_proof(&call), Ok(None)); - }) - } -} diff --git a/modules/grandpa/src/lib.rs b/modules/grandpa/src/lib.rs deleted file mode 100644 index b4f501fc7975abf2cf375f5b2f46be78495e4819..0000000000000000000000000000000000000000 --- a/modules/grandpa/src/lib.rs +++ /dev/null @@ -1,1703 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate GRANDPA Pallet -//! -//! This pallet is an on-chain GRANDPA light client for Substrate based chains. -//! -//! This pallet achieves this by trustlessly verifying GRANDPA finality proofs on-chain. Once -//! verified, finalized headers are stored in the pallet, thereby creating a sparse header chain. -//! This sparse header chain can be used as a source of truth for other higher-level applications. -//! -//! The pallet is responsible for tracking GRANDPA validator set hand-offs. We only import headers -//! with justifications signed by the current validator set we know of. The header is inspected for -//! a `ScheduledChanges` digest item, which is then used to update to next validator set. -//! -//! Since this pallet only tracks finalized headers it does not deal with forks. Forks can only -//! occur if the GRANDPA validator set on the bridged chain is either colluding or there is a severe -//! bug causing resulting in an equivocation. Such events are outside the scope of this pallet. -//! Shall the fork occur on the bridged chain governance intervention will be required to -//! re-initialize the bridge and track the right fork. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use storage_types::StoredAuthoritySet; - -use bp_header_chain::{ - justification::GrandpaJustification, AuthoritySet, ChainWithGrandpa, GrandpaConsensusLogReader, - HeaderChain, InitializationData, StoredHeaderData, StoredHeaderDataBuilder, - StoredHeaderGrandpaInfo, -}; -use bp_runtime::{BlockNumberOf, HashOf, HasherOf, HeaderId, HeaderOf, OwnedBridgeModule}; -use frame_support::{dispatch::PostDispatchInfo, ensure, DefaultNoBound}; -use sp_consensus_grandpa::SetId; -use sp_runtime::{ - traits::{Header as HeaderT, Zero}, - SaturatedConversion, -}; -use sp_std::{boxed::Box, convert::TryInto, prelude::*}; - -mod call_ext; -#[cfg(test)] -mod mock; -mod storage_types; - -/// Module, containing weights for this pallet. -pub mod weights; -pub mod weights_ext; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -// Re-export in crate namespace for `construct_runtime!` -pub use call_ext::*; -pub use pallet::*; -pub use weights::WeightInfo; -pub use weights_ext::WeightInfoExt; - -/// The target that will be used when publishing logs related to this pallet. -pub const LOG_TARGET: &str = "runtime::bridge-grandpa"; - -/// Bridged chain from the pallet configuration. -pub type BridgedChain = >::BridgedChain; -/// Block number of the bridged chain. -pub type BridgedBlockNumber = BlockNumberOf<>::BridgedChain>; -/// Block hash of the bridged chain. -pub type BridgedBlockHash = HashOf<>::BridgedChain>; -/// Block id of the bridged chain. -pub type BridgedBlockId = HeaderId, BridgedBlockNumber>; -/// Hasher of the bridged chain. -pub type BridgedBlockHasher = HasherOf<>::BridgedChain>; -/// Header of the bridged chain. -pub type BridgedHeader = HeaderOf<>::BridgedChain>; -/// Header data of the bridged chain that is stored at this chain by this pallet. -pub type BridgedStoredHeaderData = - StoredHeaderData, BridgedBlockHash>; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use bp_runtime::BasicOperatingMode; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; - - /// The chain we are bridging to here. - type BridgedChain: ChainWithGrandpa; - - /// Maximal number of "free" header transactions per block. - /// - /// To be able to track the bridged chain, the pallet requires all headers that are - /// changing GRANDPA authorities set at the bridged chain (we call them mandatory). - /// So it is a common good deed to submit mandatory headers to the pallet. - /// - /// The pallet may be configured (see `[Self::FreeHeadersInterval]`) to import some - /// non-mandatory headers for free as well. It also may be treated as a common good - /// deed, because it may help to reduce bridge fees - this cost may be deducted from - /// bridge fees, paid by message senders. - /// - /// However, if the bridged chain gets compromised, its validators may generate as many - /// "free" headers as they want. And they may fill the whole block (at this chain) for - /// free. This constants limits number of calls that we may refund in a single block. - /// All calls above this limit are accepted, but are not refunded. - #[pallet::constant] - type MaxFreeHeadersPerBlock: Get; - - /// The distance between bridged chain headers, that may be submitted for free. The - /// first free header is header number zero, the next one is header number - /// `FreeHeadersInterval::get()` or any of its descendant if that header has not - /// bee submitted. In other words, interval between free headers should be at least - /// `FreeHeadersInterval`. - #[pallet::constant] - type FreeHeadersInterval: Get>; - - /// Maximal number of finalized headers to keep in the storage. - /// - /// The setting is there to prevent growing the on-chain state indefinitely. Note - /// the setting does not relate to block numbers - we will simply keep as much items - /// in the storage, so it doesn't guarantee any fixed timeframe for finality headers. - /// - /// Incautious change of this constant may lead to orphan entries in the runtime storage. - #[pallet::constant] - type HeadersToKeep: Get; - - /// Weights gathered through benchmarking. - type WeightInfo: WeightInfoExt; - } - - #[pallet::pallet] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { - fn on_initialize(_n: BlockNumberFor) -> Weight { - FreeHeadersRemaining::::put(T::MaxFreeHeadersPerBlock::get()); - Weight::zero() - } - - fn on_finalize(_n: BlockNumberFor) { - FreeHeadersRemaining::::kill(); - } - } - - impl, I: 'static> OwnedBridgeModule for Pallet { - const LOG_TARGET: &'static str = LOG_TARGET; - type OwnerStorage = PalletOwner; - type OperatingMode = BasicOperatingMode; - type OperatingModeStorage = PalletOperatingMode; - } - - #[pallet::call] - impl, I: 'static> Pallet { - /// This call is deprecated and will be removed around May 2024. Use the - /// `submit_finality_proof_ex` instead. Semantically, this call is an equivalent of the - /// `submit_finality_proof_ex` call without current authority set id check. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::submit_finality_proof_weight( - justification.commit.precommits.len().saturated_into(), - justification.votes_ancestries.len().saturated_into(), - ))] - #[allow(deprecated)] - #[deprecated( - note = "`submit_finality_proof` will be removed in May 2024. Use `submit_finality_proof_ex` instead." - )] - pub fn submit_finality_proof( - origin: OriginFor, - finality_target: Box>, - justification: GrandpaJustification>, - ) -> DispatchResultWithPostInfo { - Self::submit_finality_proof_ex( - origin, - finality_target, - justification, - // the `submit_finality_proof_ex` also reads this value, but it is done from the - // cache, so we don't treat it as an additional db access - >::get().set_id, - // cannot enforce free execution using this call - false, - ) - } - - /// Bootstrap the bridge pallet with an initial header and authority set from which to sync. - /// - /// The initial configuration provided does not need to be the genesis header of the bridged - /// chain, it can be any arbitrary header. You can also provide the next scheduled set - /// change if it is already know. - /// - /// This function is only allowed to be called from a trusted origin and writes to storage - /// with practically no checks in terms of the validity of the data. It is important that - /// you ensure that valid data is being passed in. - #[pallet::call_index(1)] - #[pallet::weight((T::DbWeight::get().reads_writes(2, 5), DispatchClass::Operational))] - pub fn initialize( - origin: OriginFor, - init_data: super::InitializationData>, - ) -> DispatchResultWithPostInfo { - Self::ensure_owner_or_root(origin)?; - - let init_allowed = !>::exists(); - ensure!(init_allowed, >::AlreadyInitialized); - initialize_bridge::(init_data.clone())?; - - log::info!( - target: LOG_TARGET, - "Pallet has been initialized with the following parameters: {:?}", - init_data - ); - - Ok(().into()) - } - - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(2)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { - >::set_owner(origin, new_owner) - } - - /// Halt or resume all pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(3)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - operating_mode: BasicOperatingMode, - ) -> DispatchResult { - >::set_operating_mode(origin, operating_mode) - } - - /// Verify a target header is finalized according to the given finality proof. The proof - /// is assumed to be signed by GRANDPA authorities set with `current_set_id` id. - /// - /// It will use the underlying storage pallet to fetch information about the current - /// authorities and best finalized header in order to verify that the header is finalized. - /// - /// If successful in verification, it will write the target header to the underlying storage - /// pallet. - /// - /// The call fails if: - /// - /// - the pallet is halted; - /// - /// - the pallet knows better header than the `finality_target`; - /// - /// - the id of best GRANDPA authority set, known to the pallet is not equal to the - /// `current_set_id`; - /// - /// - verification is not optimized or invalid; - /// - /// - header contains forced authorities set change or change with non-zero delay. - /// - /// The `is_free_execution_expected` parameter is not really used inside the call. It is - /// used by the transaction extension, which should be registered at the runtime level. If - /// this parameter is `true`, the transaction will be treated as invalid, if the call won't - /// be executed for free. If transaction extension is not used by the runtime, this - /// parameter is not used at all. - #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::submit_finality_proof_weight( - justification.commit.precommits.len().saturated_into(), - justification.votes_ancestries.len().saturated_into(), - ))] - pub fn submit_finality_proof_ex( - origin: OriginFor, - finality_target: Box>, - justification: GrandpaJustification>, - current_set_id: sp_consensus_grandpa::SetId, - _is_free_execution_expected: bool, - ) -> DispatchResultWithPostInfo { - Self::ensure_not_halted().map_err(Error::::BridgeModule)?; - ensure_signed(origin)?; - - let (hash, number) = (finality_target.hash(), *finality_target.number()); - log::trace!( - target: LOG_TARGET, - "Going to try and finalize header {:?}", - finality_target - ); - - // it checks whether the `number` is better than the current best block number - // and whether the `current_set_id` matches the best known set id - let improved_by = - SubmitFinalityProofHelper::::check_obsolete(number, Some(current_set_id))?; - - let authority_set = >::get(); - let unused_proof_size = authority_set.unused_proof_size(); - let set_id = authority_set.set_id; - let authority_set: AuthoritySet = authority_set.into(); - verify_justification::(&justification, hash, number, authority_set)?; - - let maybe_new_authority_set = - try_enact_authority_change::(&finality_target, set_id)?; - let may_refund_call_fee = may_refund_call_fee::( - maybe_new_authority_set.is_some(), - &finality_target, - &justification, - current_set_id, - improved_by, - ); - if may_refund_call_fee { - on_free_header_imported::(); - } - insert_header::(*finality_target, hash); - - // mandatory header is a header that changes authorities set. The pallet can't go - // further without importing this header. So every bridge MUST import mandatory headers. - // - // We don't want to charge extra costs for mandatory operations. So relayer is not - // paying fee for mandatory headers import transactions. - // - // If size/weight of the call is exceeds our estimated limits, the relayer still needs - // to pay for the transaction. - let pays_fee = if may_refund_call_fee { Pays::No } else { Pays::Yes }; - - log::info!( - target: LOG_TARGET, - "Successfully imported finalized header with hash {:?}! Free: {}", - hash, - if may_refund_call_fee { "Yes" } else { "No" }, - ); - - // the proof size component of the call weight assumes that there are - // `MaxBridgedAuthorities` in the `CurrentAuthoritySet` (we use `MaxEncodedLen` - // estimation). But if their number is lower, then we may "refund" some `proof_size`, - // making proof smaller and leaving block space to other useful transactions - let pre_dispatch_weight = T::WeightInfo::submit_finality_proof( - justification.commit.precommits.len().saturated_into(), - justification.votes_ancestries.len().saturated_into(), - ); - let actual_weight = pre_dispatch_weight - .set_proof_size(pre_dispatch_weight.proof_size().saturating_sub(unused_proof_size)); - - Self::deposit_event(Event::UpdatedBestFinalizedHeader { - number, - hash, - grandpa_info: StoredHeaderGrandpaInfo { - finality_proof: justification, - new_verification_context: maybe_new_authority_set, - }, - }); - - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee }) - } - } - - /// Number of free header submissions that we may yet accept in the current block. - /// - /// If the `FreeHeadersRemaining` hits zero, all following mandatory headers in the - /// current block are accepted with fee (`Pays::Yes` is returned). - /// - /// The `FreeHeadersRemaining` is an ephemeral value that is set to - /// `MaxFreeHeadersPerBlock` at each block initialization and is killed on block - /// finalization. So it never ends up in the storage trie. - #[pallet::storage] - #[pallet::whitelist_storage] - #[pallet::getter(fn free_mandatory_headers_remaining)] - pub type FreeHeadersRemaining, I: 'static = ()> = - StorageValue<_, u32, OptionQuery>; - - /// Hash of the header used to bootstrap the pallet. - #[pallet::storage] - pub(super) type InitialHash, I: 'static = ()> = - StorageValue<_, BridgedBlockHash, ValueQuery>; - - /// Hash of the best finalized header. - #[pallet::storage] - #[pallet::getter(fn best_finalized)] - pub type BestFinalized, I: 'static = ()> = - StorageValue<_, BridgedBlockId, OptionQuery>; - - /// A ring buffer of imported hashes. Ordered by the insertion time. - #[pallet::storage] - pub(super) type ImportedHashes, I: 'static = ()> = StorageMap< - Hasher = Identity, - Key = u32, - Value = BridgedBlockHash, - QueryKind = OptionQuery, - OnEmpty = GetDefault, - MaxValues = MaybeHeadersToKeep, - >; - - /// Current ring buffer position. - #[pallet::storage] - pub(super) type ImportedHashesPointer, I: 'static = ()> = - StorageValue<_, u32, ValueQuery>; - - /// Relevant fields of imported headers. - #[pallet::storage] - pub type ImportedHeaders, I: 'static = ()> = StorageMap< - Hasher = Identity, - Key = BridgedBlockHash, - Value = BridgedStoredHeaderData, - QueryKind = OptionQuery, - OnEmpty = GetDefault, - MaxValues = MaybeHeadersToKeep, - >; - - /// The current GRANDPA Authority set. - #[pallet::storage] - pub type CurrentAuthoritySet, I: 'static = ()> = - StorageValue<_, StoredAuthoritySet, ValueQuery>; - - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - #[pallet::storage] - pub type PalletOwner, I: 'static = ()> = - StorageValue<_, T::AccountId, OptionQuery>; - - /// The current operating mode of the pallet. - /// - /// Depending on the mode either all, or no transactions will be allowed. - #[pallet::storage] - pub type PalletOperatingMode, I: 'static = ()> = - StorageValue<_, BasicOperatingMode, ValueQuery>; - - #[pallet::genesis_config] - #[derive(DefaultNoBound)] - pub struct GenesisConfig, I: 'static = ()> { - /// Optional module owner account. - pub owner: Option, - /// Optional module initialization data. - pub init_data: Option>>, - } - - #[pallet::genesis_build] - impl, I: 'static> BuildGenesisConfig for GenesisConfig { - fn build(&self) { - if let Some(ref owner) = self.owner { - >::put(owner); - } - - if let Some(init_data) = self.init_data.clone() { - initialize_bridge::(init_data).expect("genesis config is correct; qed"); - } else { - // Since the bridge hasn't been initialized we shouldn't allow anyone to perform - // transactions. - >::put(BasicOperatingMode::Halted); - } - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event, I: 'static = ()> { - /// Best finalized chain header has been updated to the header with given number and hash. - UpdatedBestFinalizedHeader { - /// Number of the new best finalized header. - number: BridgedBlockNumber, - /// Hash of the new best finalized header. - hash: BridgedBlockHash, - /// The Grandpa info associated to the new best finalized header. - grandpa_info: StoredHeaderGrandpaInfo>, - }, - } - - #[pallet::error] - pub enum Error { - /// The given justification is invalid for the given header. - InvalidJustification, - /// The authority set from the underlying header chain is invalid. - InvalidAuthoritySet, - /// The header being imported is older than the best finalized header known to the pallet. - OldHeader, - /// The scheduled authority set change found in the header is unsupported by the pallet. - /// - /// This is the case for non-standard (e.g forced) authority set changes. - UnsupportedScheduledChange, - /// The pallet is not yet initialized. - NotInitialized, - /// The pallet has already been initialized. - AlreadyInitialized, - /// Too many authorities in the set. - TooManyAuthoritiesInSet, - /// Error generated by the `OwnedBridgeModule` trait. - BridgeModule(bp_runtime::OwnedBridgeModuleError), - /// The `current_set_id` argument of the `submit_finality_proof_ex` doesn't match - /// the id of the current set, known to the pallet. - InvalidAuthoritySetId, - /// The submitter wanted free execution, but we can't fit more free transactions - /// to the block. - FreeHeadersLimitExceded, - /// The submitter wanted free execution, but the difference between best known and - /// bundled header numbers is below the `FreeHeadersInterval`. - BelowFreeHeaderInterval, - } - - /// Called when new free header is imported. - pub fn on_free_header_imported, I: 'static>() { - FreeHeadersRemaining::::mutate(|count| { - *count = match *count { - // never set to `None` here - the signed extension assumes that it is `None` - // outside of block execution - i.e. when transaction is validatied from - // the transaction pool - Some(count) => Some(count.saturating_sub(1)), - None => None, - } - }); - } - - /// Return true if we may refund transaction cost to the submitter. In other words, - /// this transaction is considered as common good deed w.r.t to pallet configuration. - fn may_refund_call_fee, I: 'static>( - is_mandatory_header: bool, - finality_target: &BridgedHeader, - justification: &GrandpaJustification>, - current_set_id: SetId, - improved_by: BridgedBlockNumber, - ) -> bool { - // if we have refunded too much at this block => not refunding - if FreeHeadersRemaining::::get().unwrap_or(0) == 0 { - return false; - } - - // if size/weight of call is larger than expected => not refunding - let call_info = submit_finality_proof_info_from_args::( - &finality_target, - &justification, - Some(current_set_id), - // this function is called from the transaction body and we do not want - // to do MAY-be-free-executed checks here - they had to be done in the - // transaction extension before - false, - ); - if !call_info.fits_limits() { - return false; - } - - // if that's a mandatory header => refund - if is_mandatory_header { - return true; - } - - // if configuration allows free non-mandatory headers and the header - // matches criteria => refund - if let Some(free_headers_interval) = T::FreeHeadersInterval::get() { - if improved_by >= free_headers_interval.into() { - return true; - } - } - - false - } - - /// Check the given header for a GRANDPA scheduled authority set change. If a change - /// is found it will be enacted immediately. - /// - /// This function does not support forced changes, or scheduled changes with delays - /// since these types of changes are indicative of abnormal behavior from GRANDPA. - /// - /// Returned value will indicate if a change was enacted or not. - pub(crate) fn try_enact_authority_change, I: 'static>( - header: &BridgedHeader, - current_set_id: sp_consensus_grandpa::SetId, - ) -> Result, DispatchError> { - // We don't support forced changes - at that point governance intervention is required. - ensure!( - GrandpaConsensusLogReader::>::find_forced_change( - header.digest() - ) - .is_none(), - >::UnsupportedScheduledChange - ); - - if let Some(change) = - GrandpaConsensusLogReader::>::find_scheduled_change( - header.digest(), - ) { - // GRANDPA only includes a `delay` for forced changes, so this isn't valid. - ensure!(change.delay == Zero::zero(), >::UnsupportedScheduledChange); - - // TODO [#788]: Stop manually increasing the `set_id` here. - let next_authorities = StoredAuthoritySet:: { - authorities: change - .next_authorities - .try_into() - .map_err(|_| Error::::TooManyAuthoritiesInSet)?, - set_id: current_set_id + 1, - }; - - // Since our header schedules a change and we know the delay is 0, it must also enact - // the change. - >::put(&next_authorities); - - log::info!( - target: LOG_TARGET, - "Transitioned from authority set {} to {}! New authorities are: {:?}", - current_set_id, - current_set_id + 1, - next_authorities, - ); - - return Ok(Some(next_authorities.into())) - }; - - Ok(None) - } - - /// Verify a GRANDPA justification (finality proof) for a given header. - /// - /// Will use the GRANDPA current authorities known to the pallet. - /// - /// If successful it returns the decoded GRANDPA justification so we can refund any weight which - /// was overcharged in the initial call. - pub(crate) fn verify_justification, I: 'static>( - justification: &GrandpaJustification>, - hash: BridgedBlockHash, - number: BridgedBlockNumber, - authority_set: bp_header_chain::AuthoritySet, - ) -> Result<(), sp_runtime::DispatchError> { - use bp_header_chain::justification::verify_justification; - - Ok(verify_justification::>( - (hash, number), - &authority_set.try_into().map_err(|_| >::InvalidAuthoritySet)?, - justification, - ) - .map_err(|e| { - log::error!( - target: LOG_TARGET, - "Received invalid justification for {:?}: {:?}", - hash, - e, - ); - >::InvalidJustification - })?) - } - - /// Import a previously verified header to the storage. - /// - /// Note this function solely takes care of updating the storage and pruning old entries, - /// but does not verify the validity of such import. - pub(crate) fn insert_header, I: 'static>( - header: BridgedHeader, - hash: BridgedBlockHash, - ) { - let index = >::get(); - let pruning = >::try_get(index); - >::put(HeaderId(*header.number(), hash)); - >::insert(hash, header.build()); - >::insert(index, hash); - - // Update ring buffer pointer and remove old header. - >::put((index + 1) % T::HeadersToKeep::get()); - if let Ok(hash) = pruning { - log::debug!(target: LOG_TARGET, "Pruning old header: {:?}.", hash); - >::remove(hash); - } - } - - /// Since this writes to storage with no real checks this should only be used in functions that - /// were called by a trusted origin. - pub(crate) fn initialize_bridge, I: 'static>( - init_params: super::InitializationData>, - ) -> Result<(), Error> { - let super::InitializationData { header, authority_list, set_id, operating_mode } = - init_params; - let authority_set_length = authority_list.len(); - let authority_set = StoredAuthoritySet::::try_new(authority_list, set_id) - .map_err(|e| { - log::error!( - target: LOG_TARGET, - "Failed to initialize bridge. Number of authorities in the set {} is larger than the configured value {}", - authority_set_length, - T::BridgedChain::MAX_AUTHORITIES_COUNT, - ); - - e - })?; - let initial_hash = header.hash(); - - >::put(initial_hash); - >::put(0); - insert_header::(*header, initial_hash); - - >::put(authority_set); - - >::put(operating_mode); - - Ok(()) - } - - /// Adapter for using `Config::HeadersToKeep` as `MaxValues` bound in our storage maps. - pub struct MaybeHeadersToKeep(PhantomData<(T, I)>); - - // this implementation is required to use the struct as `MaxValues` - impl, I: 'static> Get> for MaybeHeadersToKeep { - fn get() -> Option { - Some(T::HeadersToKeep::get()) - } - } - - /// Initialize pallet so that it is ready for inserting new header. - /// - /// The function makes sure that the new insertion will cause the pruning of some old header. - /// - /// Returns parent header for the new header. - #[cfg(feature = "runtime-benchmarks")] - pub(crate) fn bootstrap_bridge, I: 'static>( - init_params: super::InitializationData>, - ) -> BridgedHeader { - let start_header = init_params.header.clone(); - initialize_bridge::(init_params).expect("benchmarks are correct"); - - // the most obvious way to cause pruning during next insertion would be to insert - // `HeadersToKeep` headers. But it'll make our benchmarks slow. So we will just play with - // our pruning ring-buffer. - assert_eq!(ImportedHashesPointer::::get(), 1); - ImportedHashesPointer::::put(0); - - *start_header - } -} - -impl, I: 'static> Pallet -where - ::RuntimeEvent: TryInto>, -{ - /// Get the GRANDPA justifications accepted in the current block. - pub fn synced_headers_grandpa_info() -> Vec>> { - frame_system::Pallet::::read_events_no_consensus() - .filter_map(|event| { - if let Event::::UpdatedBestFinalizedHeader { grandpa_info, .. } = - event.event.try_into().ok()? - { - return Some(grandpa_info) - } - None - }) - .collect() - } -} - -/// Bridge GRANDPA pallet as header chain. -pub type GrandpaChainHeaders = Pallet; - -impl, I: 'static> HeaderChain> for GrandpaChainHeaders { - fn finalized_header_state_root( - header_hash: HashOf>, - ) -> Option>> { - ImportedHeaders::::get(header_hash).map(|h| h.state_root) - } -} - -/// (Re)initialize bridge with given header for using it in `pallet-bridge-messages` benchmarks. -#[cfg(feature = "runtime-benchmarks")] -pub fn initialize_for_benchmarks, I: 'static>(header: BridgedHeader) { - initialize_bridge::(InitializationData { - header: Box::new(header), - authority_list: sp_std::vec::Vec::new(), /* we don't verify any proofs in external - * benchmarks */ - set_id: 0, - operating_mode: bp_runtime::BasicOperatingMode::Normal, - }) - .expect("only used from benchmarks; benchmarks are correct; qed"); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ - run_test, test_header, FreeHeadersInterval, RuntimeEvent as TestEvent, RuntimeOrigin, - System, TestBridgedChain, TestHeader, TestNumber, TestRuntime, MAX_BRIDGED_AUTHORITIES, - }; - use bp_header_chain::BridgeGrandpaCall; - use bp_runtime::BasicOperatingMode; - use bp_test_utils::{ - authority_list, generate_owned_bridge_module_tests, make_default_justification, - make_justification_for_header, JustificationGeneratorParams, ALICE, BOB, - TEST_GRANDPA_SET_ID, - }; - use codec::Encode; - use frame_support::{ - assert_err, assert_noop, assert_ok, - dispatch::{Pays, PostDispatchInfo}, - storage::generator::StorageValue, - }; - use frame_system::{EventRecord, Phase}; - use sp_consensus_grandpa::{ConsensusLog, GRANDPA_ENGINE_ID}; - use sp_core::Get; - use sp_runtime::{Digest, DigestItem, DispatchError}; - - fn initialize_substrate_bridge() { - System::set_block_number(1); - System::reset_events(); - - assert_ok!(init_with_origin(RuntimeOrigin::root())); - } - - fn init_with_origin( - origin: RuntimeOrigin, - ) -> Result< - InitializationData, - sp_runtime::DispatchErrorWithPostInfo, - > { - let genesis = test_header(0); - - let init_data = InitializationData { - header: Box::new(genesis), - authority_list: authority_list(), - set_id: TEST_GRANDPA_SET_ID, - operating_mode: BasicOperatingMode::Normal, - }; - - Pallet::::initialize(origin, init_data.clone()).map(|_| init_data) - } - - fn submit_finality_proof(header: u8) -> frame_support::dispatch::DispatchResultWithPostInfo { - let header = test_header(header.into()); - let justification = make_default_justification(&header); - - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - TEST_GRANDPA_SET_ID, - false, - ) - } - - fn submit_finality_proof_with_set_id( - header: u8, - set_id: u64, - ) -> frame_support::dispatch::DispatchResultWithPostInfo { - let header = test_header(header.into()); - let justification = make_justification_for_header(JustificationGeneratorParams { - header: header.clone(), - set_id, - ..Default::default() - }); - - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - set_id, - false, - ) - } - - fn submit_mandatory_finality_proof( - number: u8, - set_id: u64, - ) -> frame_support::dispatch::DispatchResultWithPostInfo { - let mut header = test_header(number.into()); - // to ease tests that are using `submit_mandatory_finality_proof`, we'll be using the - // same set for all sessions - let consensus_log = - ConsensusLog::::ScheduledChange(sp_consensus_grandpa::ScheduledChange { - next_authorities: authority_list(), - delay: 0, - }); - header.digest = - Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] }; - let justification = make_justification_for_header(JustificationGeneratorParams { - header: header.clone(), - set_id, - ..Default::default() - }); - - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - set_id, - false, - ) - } - - fn next_block() { - use frame_support::traits::OnInitialize; - - let current_number = frame_system::Pallet::::block_number(); - frame_system::Pallet::::set_block_number(current_number + 1); - let _ = Pallet::::on_initialize(current_number); - } - - fn change_log(delay: u64) -> Digest { - let consensus_log = - ConsensusLog::::ScheduledChange(sp_consensus_grandpa::ScheduledChange { - next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], - delay, - }); - - Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } - } - - fn forced_change_log(delay: u64) -> Digest { - let consensus_log = ConsensusLog::::ForcedChange( - delay, - sp_consensus_grandpa::ScheduledChange { - next_authorities: vec![(ALICE.into(), 1), (BOB.into(), 1)], - delay, - }, - ); - - Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } - } - - fn many_authorities_log() -> Digest { - let consensus_log = - ConsensusLog::::ScheduledChange(sp_consensus_grandpa::ScheduledChange { - next_authorities: std::iter::repeat((ALICE.into(), 1)) - .take(MAX_BRIDGED_AUTHORITIES as usize + 1) - .collect(), - delay: 0, - }); - - Digest { logs: vec![DigestItem::Consensus(GRANDPA_ENGINE_ID, consensus_log.encode())] } - } - - #[test] - fn init_root_or_owner_origin_can_initialize_pallet() { - run_test(|| { - assert_noop!(init_with_origin(RuntimeOrigin::signed(1)), DispatchError::BadOrigin); - assert_ok!(init_with_origin(RuntimeOrigin::root())); - - // Reset storage so we can initialize the pallet again - BestFinalized::::kill(); - PalletOwner::::put(2); - assert_ok!(init_with_origin(RuntimeOrigin::signed(2))); - }) - } - - #[test] - fn init_storage_entries_are_correctly_initialized() { - run_test(|| { - assert_eq!(BestFinalized::::get(), None,); - assert_eq!(Pallet::::best_finalized(), None); - assert_eq!(PalletOperatingMode::::try_get(), Err(())); - - let init_data = init_with_origin(RuntimeOrigin::root()).unwrap(); - - assert!(>::contains_key(init_data.header.hash())); - assert_eq!(BestFinalized::::get().unwrap().1, init_data.header.hash()); - assert_eq!( - CurrentAuthoritySet::::get().authorities, - init_data.authority_list - ); - assert_eq!( - PalletOperatingMode::::try_get(), - Ok(BasicOperatingMode::Normal) - ); - }) - } - - #[test] - fn init_can_only_initialize_pallet_once() { - run_test(|| { - initialize_substrate_bridge(); - assert_noop!( - init_with_origin(RuntimeOrigin::root()), - >::AlreadyInitialized - ); - }) - } - - #[test] - fn init_fails_if_there_are_too_many_authorities_in_the_set() { - run_test(|| { - let genesis = test_header(0); - let init_data = InitializationData { - header: Box::new(genesis), - authority_list: std::iter::repeat(authority_list().remove(0)) - .take(MAX_BRIDGED_AUTHORITIES as usize + 1) - .collect(), - set_id: 1, - operating_mode: BasicOperatingMode::Normal, - }; - - assert_noop!( - Pallet::::initialize(RuntimeOrigin::root(), init_data), - Error::::TooManyAuthoritiesInSet, - ); - }); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - initialize_substrate_bridge(); - - assert_ok!(Pallet::::set_operating_mode( - RuntimeOrigin::root(), - BasicOperatingMode::Halted - )); - assert_noop!( - submit_finality_proof(1), - Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted) - ); - - assert_ok!(Pallet::::set_operating_mode( - RuntimeOrigin::root(), - BasicOperatingMode::Normal - )); - assert_ok!(submit_finality_proof(1)); - }) - } - - #[test] - fn pallet_rejects_header_if_not_initialized_yet() { - run_test(|| { - assert_noop!(submit_finality_proof(1), Error::::NotInitialized); - }); - } - - #[test] - fn succesfully_imports_header_with_valid_finality() { - run_test(|| { - initialize_substrate_bridge(); - - let header_number = 1; - let header = test_header(header_number.into()); - let justification = make_default_justification(&header); - - let pre_dispatch_weight = ::WeightInfo::submit_finality_proof( - justification.commit.precommits.len().try_into().unwrap_or(u32::MAX), - justification.votes_ancestries.len().try_into().unwrap_or(u32::MAX), - ); - - let result = submit_finality_proof(header_number); - assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); - // our test config assumes 2048 max authorities and we are just using couple - let pre_dispatch_proof_size = pre_dispatch_weight.proof_size(); - let actual_proof_size = result.unwrap().actual_weight.unwrap().proof_size(); - assert!(actual_proof_size > 0); - assert!( - actual_proof_size < pre_dispatch_proof_size, - "Actual proof size {actual_proof_size} must be less than the pre-dispatch {pre_dispatch_proof_size}", - ); - - let header = test_header(1); - assert_eq!(>::get().unwrap().1, header.hash()); - assert!(>::contains_key(header.hash())); - - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Grandpa(Event::UpdatedBestFinalizedHeader { - number: *header.number(), - hash: header.hash(), - grandpa_info: StoredHeaderGrandpaInfo { - finality_proof: justification.clone(), - new_verification_context: None, - }, - }), - topics: vec![], - }], - ); - assert_eq!( - Pallet::::synced_headers_grandpa_info(), - vec![StoredHeaderGrandpaInfo { - finality_proof: justification, - new_verification_context: None - }] - ); - }) - } - - #[test] - fn rejects_justification_that_skips_authority_set_transition() { - run_test(|| { - initialize_substrate_bridge(); - - let header = test_header(1); - - let next_set_id = 2; - let params = JustificationGeneratorParams:: { - set_id: next_set_id, - ..Default::default() - }; - let justification = make_justification_for_header(params); - - assert_err!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header.clone()), - justification.clone(), - TEST_GRANDPA_SET_ID, - false, - ), - >::InvalidJustification - ); - assert_err!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - next_set_id, - false, - ), - >::InvalidAuthoritySetId - ); - }) - } - - #[test] - fn does_not_import_header_with_invalid_finality_proof() { - run_test(|| { - initialize_substrate_bridge(); - - let header = test_header(1); - let mut justification = make_default_justification(&header); - justification.round = 42; - - assert_err!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - TEST_GRANDPA_SET_ID, - false, - ), - >::InvalidJustification - ); - }) - } - - #[test] - fn disallows_invalid_authority_set() { - run_test(|| { - let genesis = test_header(0); - - let invalid_authority_list = vec![(ALICE.into(), u64::MAX), (BOB.into(), u64::MAX)]; - let init_data = InitializationData { - header: Box::new(genesis), - authority_list: invalid_authority_list, - set_id: 1, - operating_mode: BasicOperatingMode::Normal, - }; - - assert_ok!(Pallet::::initialize(RuntimeOrigin::root(), init_data)); - - let header = test_header(1); - let justification = make_default_justification(&header); - - assert_err!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - TEST_GRANDPA_SET_ID, - false, - ), - >::InvalidAuthoritySet - ); - }) - } - - #[test] - fn importing_header_ensures_that_chain_is_extended() { - run_test(|| { - initialize_substrate_bridge(); - - assert_ok!(submit_finality_proof(4)); - assert_err!(submit_finality_proof(3), Error::::OldHeader); - assert_ok!(submit_finality_proof(5)); - }) - } - - #[test] - fn importing_header_enacts_new_authority_set() { - run_test(|| { - initialize_substrate_bridge(); - - let next_set_id = 2; - let next_authorities = vec![(ALICE.into(), 1), (BOB.into(), 1)]; - - // Need to update the header digest to indicate that our header signals an authority set - // change. The change will be enacted when we import our header. - let mut header = test_header(2); - header.digest = change_log(0); - - // Create a valid justification for the header - let justification = make_default_justification(&header); - - // Let's import our test header - let result = Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header.clone()), - justification.clone(), - TEST_GRANDPA_SET_ID, - false, - ); - assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::No); - - // Make sure that our header is the best finalized - assert_eq!(>::get().unwrap().1, header.hash()); - assert!(>::contains_key(header.hash())); - - // Make sure that the authority set actually changed upon importing our header - assert_eq!( - >::get(), - StoredAuthoritySet::::try_new(next_authorities, next_set_id) - .unwrap(), - ); - - // Here - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Grandpa(Event::UpdatedBestFinalizedHeader { - number: *header.number(), - hash: header.hash(), - grandpa_info: StoredHeaderGrandpaInfo { - finality_proof: justification.clone(), - new_verification_context: Some( - >::get().into() - ), - }, - }), - topics: vec![], - }], - ); - assert_eq!( - Pallet::::synced_headers_grandpa_info(), - vec![StoredHeaderGrandpaInfo { - finality_proof: justification, - new_verification_context: Some( - >::get().into() - ), - }] - ); - }) - } - - #[test] - fn relayer_pays_tx_fee_when_submitting_huge_mandatory_header() { - run_test(|| { - initialize_substrate_bridge(); - - // let's prepare a huge authorities change header, which is definitely above size limits - let mut header = test_header(2); - header.digest = change_log(0); - header.digest.push(DigestItem::Other(vec![42u8; 1024 * 1024])); - let justification = make_default_justification(&header); - - // without large digest item ^^^ the relayer would have paid zero transaction fee - // (`Pays::No`) - let result = Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header.clone()), - justification, - TEST_GRANDPA_SET_ID, - false, - ); - assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); - - // Make sure that our header is the best finalized - assert_eq!(>::get().unwrap().1, header.hash()); - assert!(>::contains_key(header.hash())); - }) - } - - #[test] - fn relayer_pays_tx_fee_when_submitting_justification_with_long_ancestry_votes() { - run_test(|| { - initialize_substrate_bridge(); - - // let's prepare a huge authorities change header, which is definitely above weight - // limits - let mut header = test_header(2); - header.digest = change_log(0); - let justification = make_justification_for_header(JustificationGeneratorParams { - header: header.clone(), - ancestors: TestBridgedChain::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY + 1, - ..Default::default() - }); - - // without many headers in votes ancestries ^^^ the relayer would have paid zero - // transaction fee (`Pays::No`) - let result = Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header.clone()), - justification, - TEST_GRANDPA_SET_ID, - false, - ); - assert_ok!(result); - assert_eq!(result.unwrap().pays_fee, frame_support::dispatch::Pays::Yes); - - // Make sure that our header is the best finalized - assert_eq!(>::get().unwrap().1, header.hash()); - assert!(>::contains_key(header.hash())); - }) - } - - #[test] - fn importing_header_rejects_header_with_scheduled_change_delay() { - run_test(|| { - initialize_substrate_bridge(); - - // Need to update the header digest to indicate that our header signals an authority set - // change. However, the change doesn't happen until the next block. - let mut header = test_header(2); - header.digest = change_log(1); - - // Create a valid justification for the header - let justification = make_default_justification(&header); - - // Should not be allowed to import this header - assert_err!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - TEST_GRANDPA_SET_ID, - false, - ), - >::UnsupportedScheduledChange - ); - }) - } - - #[test] - fn importing_header_rejects_header_with_forced_changes() { - run_test(|| { - initialize_substrate_bridge(); - - // Need to update the header digest to indicate that it signals a forced authority set - // change. - let mut header = test_header(2); - header.digest = forced_change_log(0); - - // Create a valid justification for the header - let justification = make_default_justification(&header); - - // Should not be allowed to import this header - assert_err!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - TEST_GRANDPA_SET_ID, - false, - ), - >::UnsupportedScheduledChange - ); - }) - } - - #[test] - fn importing_header_rejects_header_with_too_many_authorities() { - run_test(|| { - initialize_substrate_bridge(); - - // Need to update the header digest to indicate that our header signals an authority set - // change. However, the change doesn't happen until the next block. - let mut header = test_header(2); - header.digest = many_authorities_log(); - - // Create a valid justification for the header - let justification = make_default_justification(&header); - - // Should not be allowed to import this header - assert_err!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification, - TEST_GRANDPA_SET_ID, - false, - ), - >::TooManyAuthoritiesInSet - ); - }); - } - - #[test] - fn parse_finalized_storage_proof_rejects_proof_on_unknown_header() { - run_test(|| { - assert_noop!( - Pallet::::storage_proof_checker(Default::default(), vec![],) - .map(|_| ()), - bp_header_chain::HeaderChainError::UnknownHeader, - ); - }); - } - - #[test] - fn parse_finalized_storage_accepts_valid_proof() { - run_test(|| { - let (state_root, storage_proof) = bp_runtime::craft_valid_storage_proof(); - - let mut header = test_header(2); - header.set_state_root(state_root); - - let hash = header.hash(); - >::put(HeaderId(2, hash)); - >::insert(hash, header.build()); - - assert_ok!( - Pallet::::storage_proof_checker(hash, storage_proof).map(|_| ()) - ); - }); - } - - #[test] - fn rate_limiter_disallows_free_imports_once_limit_is_hit_in_single_block() { - run_test(|| { - initialize_substrate_bridge(); - - let result = submit_mandatory_finality_proof(1, 1); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(2, 2); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(3, 3); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - }) - } - - #[test] - fn rate_limiter_invalid_requests_do_not_count_towards_request_count() { - run_test(|| { - let submit_invalid_request = || { - let mut header = test_header(1); - header.digest = change_log(0); - let mut invalid_justification = make_default_justification(&header); - invalid_justification.round = 42; - - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - invalid_justification, - TEST_GRANDPA_SET_ID, - false, - ) - }; - - initialize_substrate_bridge(); - - for _ in 0..::MaxFreeHeadersPerBlock::get() + 1 { - assert_err!(submit_invalid_request(), >::InvalidJustification); - } - - // Can still submit free mandatory headers afterwards - let result = submit_mandatory_finality_proof(1, 1); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(2, 2); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(3, 3); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - }) - } - - #[test] - fn rate_limiter_allows_request_after_new_block_has_started() { - run_test(|| { - initialize_substrate_bridge(); - - let result = submit_mandatory_finality_proof(1, 1); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(2, 2); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(3, 3); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - - next_block(); - - let result = submit_mandatory_finality_proof(4, 4); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(5, 5); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_mandatory_finality_proof(6, 6); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - }) - } - - #[test] - fn rate_limiter_ignores_non_mandatory_headers() { - run_test(|| { - initialize_substrate_bridge(); - - let result = submit_finality_proof(1); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - - let result = submit_mandatory_finality_proof(2, 1); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_finality_proof_with_set_id(3, 2); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - - let result = submit_mandatory_finality_proof(4, 2); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - - let result = submit_finality_proof_with_set_id(5, 3); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - - let result = submit_mandatory_finality_proof(6, 3); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - }) - } - - #[test] - fn may_import_non_mandatory_header_for_free() { - run_test(|| { - initialize_substrate_bridge(); - - // set best finalized to `100` - const BEST: u8 = 12; - fn reset_best() { - BestFinalized::::set(Some(HeaderId( - BEST as _, - Default::default(), - ))); - } - - // non-mandatory header is imported with fee - reset_best(); - let non_free_header_number = BEST + FreeHeadersInterval::get() as u8 - 1; - let result = submit_finality_proof(non_free_header_number); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - - // non-mandatory free header is imported without fee - reset_best(); - let free_header_number = BEST + FreeHeadersInterval::get() as u8; - let result = submit_finality_proof(free_header_number); - assert_eq!(result.unwrap().pays_fee, Pays::No); - - // another non-mandatory free header is imported without fee - let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 2; - let result = submit_finality_proof(free_header_number); - assert_eq!(result.unwrap().pays_fee, Pays::No); - - // now the rate limiter starts charging fees even for free headers - let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 3; - let result = submit_finality_proof(free_header_number); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - - // check that we can import for free if `improved_by` is larger - // than the free interval - next_block(); - reset_best(); - let free_header_number = FreeHeadersInterval::get() as u8 + 42; - let result = submit_finality_proof(free_header_number); - assert_eq!(result.unwrap().pays_fee, Pays::No); - - // check that the rate limiter shares the counter between mandatory - // and free non-mandatory headers - next_block(); - reset_best(); - let free_header_number = BEST + FreeHeadersInterval::get() as u8 * 4; - let result = submit_finality_proof(free_header_number); - assert_eq!(result.unwrap().pays_fee, Pays::No); - let result = submit_mandatory_finality_proof(free_header_number + 1, 1); - assert_eq!(result.expect("call failed").pays_fee, Pays::No); - let result = submit_mandatory_finality_proof(free_header_number + 2, 2); - assert_eq!(result.expect("call failed").pays_fee, Pays::Yes); - }); - } - - #[test] - fn should_prune_headers_over_headers_to_keep_parameter() { - run_test(|| { - initialize_substrate_bridge(); - assert_ok!(submit_finality_proof(1)); - let first_header_hash = Pallet::::best_finalized().unwrap().hash(); - next_block(); - - assert_ok!(submit_finality_proof(2)); - next_block(); - assert_ok!(submit_finality_proof(3)); - next_block(); - assert_ok!(submit_finality_proof(4)); - next_block(); - assert_ok!(submit_finality_proof(5)); - next_block(); - - assert_ok!(submit_finality_proof(6)); - - assert!( - !ImportedHeaders::::contains_key(first_header_hash), - "First header should be pruned.", - ); - }) - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - PalletOperatingMode::::storage_value_final_key().to_vec(), - bp_header_chain::storage_keys::pallet_operating_mode_key("Grandpa").0, - ); - - assert_eq!( - CurrentAuthoritySet::::storage_value_final_key().to_vec(), - bp_header_chain::storage_keys::current_authority_set_key("Grandpa").0, - ); - - assert_eq!( - BestFinalized::::storage_value_final_key().to_vec(), - bp_header_chain::storage_keys::best_finalized_key("Grandpa").0, - ); - } - - #[test] - fn test_bridge_grandpa_call_is_correctly_defined() { - let header = test_header(0); - let init_data = InitializationData { - header: Box::new(header.clone()), - authority_list: authority_list(), - set_id: 1, - operating_mode: BasicOperatingMode::Normal, - }; - let justification = make_default_justification(&header); - - let direct_initialize_call = - Call::::initialize { init_data: init_data.clone() }; - let indirect_initialize_call = BridgeGrandpaCall::::initialize { init_data }; - assert_eq!(direct_initialize_call.encode(), indirect_initialize_call.encode()); - - let direct_submit_finality_proof_call = Call::::submit_finality_proof { - finality_target: Box::new(header.clone()), - justification: justification.clone(), - }; - let indirect_submit_finality_proof_call = - BridgeGrandpaCall::::submit_finality_proof { - finality_target: Box::new(header), - justification, - }; - assert_eq!( - direct_submit_finality_proof_call.encode(), - indirect_submit_finality_proof_call.encode() - ); - } - - generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted); - - #[test] - fn maybe_headers_to_keep_returns_correct_value() { - assert_eq!(MaybeHeadersToKeep::::get(), Some(mock::HeadersToKeep::get())); - } - - #[test] - fn submit_finality_proof_requires_signed_origin() { - run_test(|| { - initialize_substrate_bridge(); - - let header = test_header(1); - let justification = make_default_justification(&header); - - assert_noop!( - Pallet::::submit_finality_proof_ex( - RuntimeOrigin::root(), - Box::new(header), - justification, - TEST_GRANDPA_SET_ID, - false, - ), - DispatchError::BadOrigin, - ); - }) - } - - #[test] - fn on_free_header_imported_never_sets_to_none() { - run_test(|| { - FreeHeadersRemaining::::set(Some(2)); - on_free_header_imported::(); - assert_eq!(FreeHeadersRemaining::::get(), Some(1)); - on_free_header_imported::(); - assert_eq!(FreeHeadersRemaining::::get(), Some(0)); - on_free_header_imported::(); - assert_eq!(FreeHeadersRemaining::::get(), Some(0)); - }) - } -} diff --git a/modules/grandpa/src/mock.rs b/modules/grandpa/src/mock.rs deleted file mode 100644 index 78f414df146100dc3270e07d19c88746b39c7fd2..0000000000000000000000000000000000000000 --- a/modules/grandpa/src/mock.rs +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{Chain, ChainId}; -use frame_support::{ - construct_runtime, derive_impl, parameter_types, traits::Hooks, weights::Weight, -}; -use sp_core::sr25519::Signature; - -pub type AccountId = u64; -pub type TestHeader = sp_runtime::testing::Header; -pub type TestNumber = u64; - -type Block = frame_system::mocking::MockBlock; - -pub const MAX_BRIDGED_AUTHORITIES: u32 = 5; - -use crate as grandpa; - -construct_runtime! { - pub enum TestRuntime - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Grandpa: grandpa::{Pallet, Call, Event}, - } -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type Block = Block; -} - -parameter_types! { - pub const MaxFreeHeadersPerBlock: u32 = 2; - pub const FreeHeadersInterval: u32 = 32; - pub const HeadersToKeep: u32 = 5; -} - -impl grandpa::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = TestBridgedChain; - type MaxFreeHeadersPerBlock = MaxFreeHeadersPerBlock; - type FreeHeadersInterval = FreeHeadersInterval; - type HeadersToKeep = HeadersToKeep; - type WeightInfo = (); -} - -#[derive(Debug)] -pub struct TestBridgedChain; - -impl Chain for TestBridgedChain { - const ID: ChainId = *b"tbch"; - - type BlockNumber = frame_system::pallet_prelude::BlockNumberFor; - type Hash = ::Hash; - type Hasher = ::Hashing; - type Header = TestHeader; - - type AccountId = AccountId; - type Balance = u64; - type Nonce = u64; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl ChainWithGrandpa for TestBridgedChain { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; - const MAX_AUTHORITIES_COUNT: u32 = MAX_BRIDGED_AUTHORITIES; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_MANDATORY_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE: u32 = 64; -} - -/// Return test externalities to use in tests. -pub fn new_test_ext() -> sp_io::TestExternalities { - sp_io::TestExternalities::new(Default::default()) -} - -/// Return test within default test externalities context. -pub fn run_test(test: impl FnOnce() -> T) -> T { - new_test_ext().execute_with(|| { - let _ = Grandpa::on_initialize(0); - test() - }) -} - -/// Return test header with given number. -pub fn test_header(num: TestNumber) -> TestHeader { - // We wrap the call to avoid explicit type annotations in our tests - bp_test_utils::test_header(num) -} diff --git a/modules/grandpa/src/storage_types.rs b/modules/grandpa/src/storage_types.rs deleted file mode 100644 index 6d1a7882dd4996c600e8ae1cb149410b60af3bf4..0000000000000000000000000000000000000000 --- a/modules/grandpa/src/storage_types.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Wrappers for public types that are implementing `MaxEncodedLen` - -use crate::{Config, Error}; - -use bp_header_chain::{AuthoritySet, ChainWithGrandpa}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{traits::Get, BoundedVec, CloneNoBound, RuntimeDebugNoBound}; -use scale_info::TypeInfo; -use sp_consensus_grandpa::{AuthorityId, AuthorityList, AuthorityWeight, SetId}; -use sp_std::marker::PhantomData; - -/// A bounded list of Grandpa authorities with associated weights. -pub type StoredAuthorityList = - BoundedVec<(AuthorityId, AuthorityWeight), MaxBridgedAuthorities>; - -/// Adapter for using `T::BridgedChain::MAX_BRIDGED_AUTHORITIES` in `BoundedVec`. -pub struct StoredAuthorityListLimit(PhantomData<(T, I)>); - -impl, I: 'static> Get for StoredAuthorityListLimit { - fn get() -> u32 { - T::BridgedChain::MAX_AUTHORITIES_COUNT - } -} - -/// A bounded GRANDPA Authority List and ID. -#[derive(CloneNoBound, Decode, Encode, Eq, TypeInfo, MaxEncodedLen, RuntimeDebugNoBound)] -#[scale_info(skip_type_params(T, I))] -pub struct StoredAuthoritySet, I: 'static> { - /// List of GRANDPA authorities for the current round. - pub authorities: StoredAuthorityList>, - /// Monotonic identifier of the current GRANDPA authority set. - pub set_id: SetId, -} - -impl, I: 'static> StoredAuthoritySet { - /// Try to create a new bounded GRANDPA Authority Set from unbounded list. - /// - /// Returns error if number of authorities in the provided list is too large. - pub fn try_new(authorities: AuthorityList, set_id: SetId) -> Result> { - Ok(Self { - authorities: TryFrom::try_from(authorities) - .map_err(|_| Error::TooManyAuthoritiesInSet)?, - set_id, - }) - } - - /// Returns number of bytes that may be subtracted from the PoV component of - /// `submit_finality_proof` call, because the actual authorities set is smaller than the maximal - /// configured. - /// - /// Maximal authorities set size is configured by the `MaxBridgedAuthorities` constant from - /// the pallet configuration. The PoV of the call includes the size of maximal authorities - /// count. If the actual size is smaller, we may subtract extra bytes from this component. - pub fn unused_proof_size(&self) -> u64 { - // we can only safely estimate bytes that are occupied by the authority data itself. We have - // no means here to compute PoV bytes, occupied by extra trie nodes or extra bytes in the - // whole set encoding - let single_authority_max_encoded_len = - <(AuthorityId, AuthorityWeight)>::max_encoded_len() as u64; - let extra_authorities = - T::BridgedChain::MAX_AUTHORITIES_COUNT.saturating_sub(self.authorities.len() as _); - single_authority_max_encoded_len.saturating_mul(extra_authorities as u64) - } -} - -impl, I: 'static> PartialEq for StoredAuthoritySet { - fn eq(&self, other: &Self) -> bool { - self.set_id == other.set_id && self.authorities == other.authorities - } -} - -impl, I: 'static> Default for StoredAuthoritySet { - fn default() -> Self { - StoredAuthoritySet { authorities: BoundedVec::default(), set_id: 0 } - } -} - -impl, I: 'static> From> for AuthoritySet { - fn from(t: StoredAuthoritySet) -> Self { - AuthoritySet { authorities: t.authorities.into(), set_id: t.set_id } - } -} - -#[cfg(test)] -mod tests { - use crate::mock::{TestRuntime, MAX_BRIDGED_AUTHORITIES}; - use bp_test_utils::authority_list; - - type StoredAuthoritySet = super::StoredAuthoritySet; - - #[test] - fn unused_proof_size_works() { - let authority_entry = authority_list().pop().unwrap(); - - // when we have exactly `MaxBridgedAuthorities` authorities - assert_eq!( - StoredAuthoritySet::try_new( - vec![authority_entry.clone(); MAX_BRIDGED_AUTHORITIES as usize], - 0, - ) - .unwrap() - .unused_proof_size(), - 0, - ); - - // when we have less than `MaxBridgedAuthorities` authorities - assert_eq!( - StoredAuthoritySet::try_new( - vec![authority_entry; MAX_BRIDGED_AUTHORITIES as usize - 1], - 0, - ) - .unwrap() - .unused_proof_size(), - 40, - ); - - // and we can't have more than `MaxBridgedAuthorities` authorities in the bounded vec, so - // no test for this case - } -} diff --git a/modules/grandpa/src/weights.rs b/modules/grandpa/src/weights.rs deleted file mode 100644 index a75e7b5a8e4ada8ce880a040492c904d8035642c..0000000000000000000000000000000000000000 --- a/modules/grandpa/src/weights.rs +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for pallet_bridge_grandpa -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// target/release/unknown-bridge-node -// benchmark -// pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_bridge_grandpa -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/grandpa/src/weights.rs -// --template=./.maintain/bridge-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_bridge_grandpa. -pub trait WeightInfo { - fn submit_finality_proof(p: u32, v: u32) -> Weight; -} - -/// Weights for `pallet_bridge_grandpa` that are generated using one of the Bridge testnets. -/// -/// Those weights are test only and must never be used in production. -pub struct BridgeWeight(PhantomData); -impl WeightInfo for BridgeWeight { - /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: - /// 499, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: - /// 531, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), - /// added: 704, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), - /// added: 499, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), - /// added: 2016, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// The range of component `p` is `[1, 4]`. - /// - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `394 + p * (60 ±0)` - // Estimated: `4745` - // Minimum execution time: 228_072 nanoseconds. - Weight::from_parts(57_853_228, 4745) - // Standard Error: 149_421 - .saturating_add(Weight::from_parts(36_708_702, 0).saturating_mul(p.into())) - // Standard Error: 10_625 - .saturating_add(Weight::from_parts(1_469_032, 0).saturating_mul(v.into())) - .saturating_add(T::DbWeight::get().reads(6_u64)) - .saturating_add(T::DbWeight::get().writes(6_u64)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - /// Storage: BridgeUnknownGrandpa PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa RequestCount (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa RequestCount (max_values: Some(1), max_size: Some(4), added: - /// 499, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa BestFinalized (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa BestFinalized (max_values: Some(1), max_size: Some(36), added: - /// 531, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa CurrentAuthoritySet (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa CurrentAuthoritySet (max_values: Some(1), max_size: Some(209), - /// added: 704, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHashesPointer (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa ImportedHashesPointer (max_values: Some(1), max_size: Some(4), - /// added: 499, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownGrandpa ImportedHashes (max_values: Some(14400), max_size: Some(36), - /// added: 2016, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:0 w:2) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// The range of component `p` is `[1, 4]`. - /// - /// The range of component `v` is `[50, 100]`. - fn submit_finality_proof(p: u32, v: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `394 + p * (60 ±0)` - // Estimated: `4745` - // Minimum execution time: 228_072 nanoseconds. - Weight::from_parts(57_853_228, 4745) - // Standard Error: 149_421 - .saturating_add(Weight::from_parts(36_708_702, 0).saturating_mul(p.into())) - // Standard Error: 10_625 - .saturating_add(Weight::from_parts(1_469_032, 0).saturating_mul(v.into())) - .saturating_add(RocksDbWeight::get().reads(6_u64)) - .saturating_add(RocksDbWeight::get().writes(6_u64)) - } -} diff --git a/modules/messages/Cargo.toml b/modules/messages/Cargo.toml deleted file mode 100644 index f6b1e71203dd1857f290b27a4058d3d28cecd272..0000000000000000000000000000000000000000 --- a/modules/messages/Cargo.toml +++ /dev/null @@ -1,63 +0,0 @@ -[package] -name = "pallet-bridge-messages" -description = "Module that allows bridged chains to exchange messages using lane concept." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -log = { workspace = true } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Bridge dependencies - -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "num-traits/std", - "scale-info/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-balances/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/modules/messages/README.md b/modules/messages/README.md deleted file mode 100644 index fe62305748cd1d6030a7a8085bff29f24ee4dbc5..0000000000000000000000000000000000000000 --- a/modules/messages/README.md +++ /dev/null @@ -1,201 +0,0 @@ -# Bridge Messages Pallet - -The messages pallet is used to deliver messages from source chain to target chain. Message is (almost) opaque to the -module and the final goal is to hand message to the message dispatch mechanism. - -## Contents - -- [Overview](#overview) -- [Message Workflow](#message-workflow) -- [Integrating Message Lane Module into Runtime](#integrating-messages-module-into-runtime) -- [Non-Essential Functionality](#non-essential-functionality) -- [Weights of Module Extrinsics](#weights-of-module-extrinsics) - -## Overview - -Message lane is an unidirectional channel, where messages are sent from source chain to the target chain. At the same -time, a single instance of messages module supports both outbound lanes and inbound lanes. So the chain where the module -is deployed (this chain), may act as a source chain for outbound messages (heading to a bridged chain) and as a target -chain for inbound messages (coming from a bridged chain). - -Messages module supports multiple message lanes. Every message lane is identified with a 4-byte identifier. Messages -sent through the lane are assigned unique (for this lane) increasing integer value that is known as nonce ("number that -can only be used once"). Messages that are sent over the same lane are guaranteed to be delivered to the target chain in -the same order they're sent from the source chain. In other words, message with nonce `N` will be delivered right before -delivering a message with nonce `N+1`. - -Single message lane may be seen as a transport channel for single application (onchain, offchain or mixed). At the same -time the module itself never dictates any lane or message rules. In the end, it is the runtime developer who defines -what message lane and message mean for this runtime. - -In our [Kusama<>Polkadot bridge](../../docs/polkadot-kusama-bridge-overview.md) we are using lane as a channel of -communication between two parachains of different relay chains. For example, lane `[0, 0, 0, 0]` is used for Polkadot <> -Kusama Asset Hub communications. Other lanes may be used to bridge other parachains. - -## Message Workflow - -The pallet is not intended to be used by end users and provides no public calls to send the message. Instead, it -provides runtime-internal method that allows other pallets (or other runtime code) to queue outbound messages. - -The message "appears" when some runtime code calls the `send_message()` method of the pallet. The submitter specifies -the lane that they're willing to use and the message itself. If some fee must be paid for sending the message, it must -be paid outside of the pallet. If a message passes all checks (that include, for example, message size check, disabled -lane check, ...), the nonce is assigned and the message is stored in the module storage. The message is in an -"undelivered" state now. - -We assume that there are external, offchain actors, called relayers, that are submitting module related transactions to -both target and source chains. The pallet itself has no assumptions about relayers incentivization scheme, but it has -some callbacks for paying rewards. See [Integrating Messages Module into -runtime](#Integrating-Messages-Module-into-runtime) for details. - -Eventually, some relayer would notice this message in the "undelivered" state and it would decide to deliver this -message. Relayer then crafts `receive_messages_proof()` transaction (aka delivery transaction) for the messages module -instance, deployed at the target chain. Relayer provides its account id at the source chain, the proof of message (or -several messages), the number of messages in the transaction and their cumulative dispatch weight. Once a transaction is -mined, the message is considered "delivered". - -Once a message is delivered, the relayer may want to confirm delivery back to the source chain. There are two reasons -why it would want to do that. The first is that we intentionally limit number of "delivered", but not yet "confirmed" -messages at inbound lanes (see [What about other Constants in the Messages Module Configuration -Trait](#What-about-other-Constants-in-the-Messages-Module-Configuration-Trait) for explanation). So at some point, the -target chain may stop accepting new messages until relayers confirm some of these. The second is that if the relayer -wants to be rewarded for delivery, it must prove the fact that it has actually delivered the message. And this proof may -only be generated after the delivery transaction is mined. So relayer crafts the `receive_messages_delivery_proof()` -transaction (aka confirmation transaction) for the messages module instance, deployed at the source chain. Once this -transaction is mined, the message is considered "confirmed". - -The "confirmed" state is the final state of the message. But there's one last thing related to the message - the fact -that it is now "confirmed" and reward has been paid to the relayer (or at least callback for this has been called), must -be confirmed to the target chain. Otherwise, we may reach the limit of "unconfirmed" messages at the target chain and it -will stop accepting new messages. So relayer sometimes includes a nonce of the latest "confirmed" message in the next -`receive_messages_proof()` transaction, proving that some messages have been confirmed. - -## Integrating Messages Module into Runtime - -As it has been said above, the messages module supports both outbound and inbound message lanes. So if we will integrate -a module in some runtime, it may act as the source chain runtime for outbound messages and as the target chain runtime -for inbound messages. In this section, we'll sometimes refer to the chain we're currently integrating with, as "this -chain" and the other chain as "bridged chain". - -Messages module doesn't simply accept transactions that are claiming that the bridged chain has some updated data for -us. Instead of this, the module assumes that the bridged chain is able to prove that updated data in some way. The proof -is abstracted from the module and may be of any kind. In our Substrate-to-Substrate bridge we're using runtime storage -proofs. Other bridges may use transaction proofs, Substrate header digests or anything else that may be proved. - -**IMPORTANT NOTE**: everything below in this chapter describes details of the messages module configuration. But if -you're interested in well-probed and relatively easy integration of two Substrate-based chains, you may want to look at -the [bridge-runtime-common](../../bin/runtime-common/) crate. This crate is providing a lot of helpers for integration, -which may be directly used from within your runtime. Then if you'll decide to change something in this scheme, get back -here for detailed information. - -### General Information - -The messages module supports instances. Every module instance is supposed to bridge this chain and some bridged chain. -To bridge with another chain, using another instance is suggested (this isn't forced anywhere in the code, though). Keep -in mind, that the pallet may be used to build virtual channels between multiple chains, as we do in our [Polkadot <> -Kusama bridge](../../docs/polkadot-kusama-bridge-overview.md). There, the pallet actually bridges only two parachains - -Kusama Bridge Hub and Polkadot Bridge Hub. However, other Kusama and Polkadot parachains are able to send (XCM) messages -to their Bridge Hubs. The messages will be delivered to the other side of the bridge and routed to the proper -destination parachain within the bridged chain consensus. - -Message submitters may track message progress by inspecting module events. When Message is accepted, the -`MessageAccepted` event is emitted. The event contains both message lane identifier and nonce that has been assigned to -the message. When a message is delivered to the target chain, the `MessagesDelivered` event is emitted from the -`receive_messages_delivery_proof()` transaction. The `MessagesDelivered` contains the message lane identifier and -inclusive range of delivered message nonces. - -The pallet provides no means to get the result of message dispatch at the target chain. If that is required, it must be -done outside of the pallet. For example, XCM messages, when dispatched, have special instructions to send some data back -to the sender. Other dispatchers may use similar mechanism for that. -### How to plug-in Messages Module to Send Messages to the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 3 main associated types that are used to work with outbound messages. The -`pallet_bridge_messages::Config::TargetHeaderChain` defines how we see the bridged chain as the target for our outbound -messages. It must be able to check that the bridged chain may accept our message - like that the message has size below -maximal possible transaction size of the chain and so on. And when the relayer sends us a confirmation transaction, this -implementation must be able to parse and verify the proof of messages delivery. Normally, you would reuse the same -(configurable) type on all chains that are sending messages to the same bridged chain. - -The last type is the `pallet_bridge_messages::Config::DeliveryConfirmationPayments`. When confirmation -transaction is received, we call the `pay_reward()` method, passing the range of delivered messages. -You may use the [`pallet-bridge-relayers`](../relayers/) pallet and its -[`DeliveryConfirmationPaymentsAdapter`](../relayers/src/payment_adapter.rs) adapter as a possible -implementation. It allows you to pay fixed reward for relaying the message and some of its portion -for confirming delivery. - -### I have a Messages Module in my Runtime, but I Want to Reject all Outbound Messages. What shall I do? - -You should be looking at the `bp_messages::source_chain::ForbidOutboundMessages` structure -[`bp_messages::source_chain`](../../primitives/messages/src/source_chain.rs). It implements all required traits and will -simply reject all transactions, related to outbound messages. - -### How to plug-in Messages Module to Receive Messages from the Bridged Chain? - -The `pallet_bridge_messages::Config` trait has 2 main associated types that are used to work with inbound messages. The -`pallet_bridge_messages::Config::SourceHeaderChain` defines how we see the bridged chain as the source of our inbound -messages. When relayer sends us a delivery transaction, this implementation must be able to parse and verify the proof -of messages wrapped in this transaction. Normally, you would reuse the same (configurable) type on all chains that are -sending messages to the same bridged chain. - -The `pallet_bridge_messages::Config::MessageDispatch` defines a way on how to dispatch delivered messages. Apart from -actually dispatching the message, the implementation must return the correct dispatch weight of the message before -dispatch is called. - -### I have a Messages Module in my Runtime, but I Want to Reject all Inbound Messages. What shall I do? - -You should be looking at the `bp_messages::target_chain::ForbidInboundMessages` structure from the -[`bp_messages::target_chain`](../../primitives/messages/src/target_chain.rs) module. It implements all required traits -and will simply reject all transactions, related to inbound messages. - -### What about other Constants in the Messages Module Configuration Trait? - -Two settings that are used to check messages in the `send_message()` function. The -`pallet_bridge_messages::Config::ActiveOutboundLanes` is an array of all message lanes, that may be used to send -messages. All messages sent using other lanes are rejected. All messages that have size above -`pallet_bridge_messages::Config::MaximalOutboundPayloadSize` will also be rejected. - -To be able to reward the relayer for delivering messages, we store a map of message nonces range => identifier of the -relayer that has delivered this range at the target chain runtime storage. If a relayer delivers multiple consequent -ranges, they're merged into single entry. So there may be more than one entry for the same relayer. Eventually, this -whole map must be delivered back to the source chain to confirm delivery and pay rewards. So to make sure we are able to -craft this confirmation transaction, we need to: (1) keep the size of this map below a certain limit and (2) make sure -that the weight of processing this map is below a certain limit. Both size and processing weight mostly depend on the -number of entries. The number of entries is limited with the -`pallet_bridge_messages::ConfigMaxUnrewardedRelayerEntriesAtInboundLane` parameter. Processing weight also depends on -the total number of messages that are being confirmed, because every confirmed message needs to be read. So there's -another `pallet_bridge_messages::Config::MaxUnconfirmedMessagesAtInboundLane` parameter for that. - -When choosing values for these parameters, you must also keep in mind that if proof in your scheme is based on finality -of headers (and it is the most obvious option for Substrate-based chains with finality notion), then choosing too small -values for these parameters may cause significant delays in message delivery. That's because there are too many actors -involved in this scheme: 1) authorities that are finalizing headers of the target chain need to finalize header with -non-empty map; 2) the headers relayer then needs to submit this header and its finality proof to the source chain; 3) -the messages relayer must then send confirmation transaction (storage proof of this map) to the source chain; 4) when -the confirmation transaction will be mined at some header, source chain authorities must finalize this header; 5) the -headers relay then needs to submit this header and its finality proof to the target chain; 6) only now the messages -relayer may submit new messages from the source to target chain and prune the entry from the map. - -Delivery transaction requires the relayer to provide both number of entries and total number of messages in the map. -This means that the module never charges an extra cost for delivering a map - the relayer would need to pay exactly for -the number of entries+messages it has delivered. So the best guess for values of these parameters would be the pair that -would occupy `N` percent of the maximal transaction size and weight of the source chain. The `N` should be large enough -to process large maps, at the same time keeping reserve for future source chain upgrades. - -## Non-Essential Functionality - -There may be a special account in every runtime where the messages module is deployed. This account, named 'module -owner', is like a module-level sudo account - he's able to halt and resume all module operations without requiring -runtime upgrade. Calls that are related to this account are: -- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; -- `fn halt_operations()`: the module owner (or sudo account) may call this function to stop all module operations. After - this call, all message-related transactions will be rejected until further `resume_operations` call'. This call may be - used when something extraordinary happens with the bridge; -- `fn resume_operations()`: module owner may call this function to resume bridge operations. The module will resume its - regular operations after this call. - -If pallet owner is not defined, the governance may be used to make those calls. - -## Messages Relay - -We have an offchain actor, who is watching for new messages and submits them to the bridged chain. It is the messages -relay - you may look at the [crate level documentation and the code](../../relays/messages/). diff --git a/modules/messages/src/benchmarking.rs b/modules/messages/src/benchmarking.rs deleted file mode 100644 index 4f13c4409672b3e76d36fd7d3dd2fab5c7e2ec1b..0000000000000000000000000000000000000000 --- a/modules/messages/src/benchmarking.rs +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Messages pallet benchmarking. - -use crate::{ - inbound_lane::InboundLaneStorage, outbound_lane, weights_ext::EXPECTED_DEFAULT_MESSAGE_LENGTH, - Call, OutboundLanes, RuntimeInboundLaneStorage, -}; - -use bp_messages::{ - source_chain::TargetHeaderChain, target_chain::SourceHeaderChain, DeliveredMessages, - InboundLaneData, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer, - UnrewardedRelayersState, -}; -use bp_runtime::StorageProofSize; -use codec::Decode; -use frame_benchmarking::{account, benchmarks_instance_pallet}; -use frame_support::weights::Weight; -use frame_system::RawOrigin; -use sp_runtime::{traits::TrailingZeroInput, BoundedVec}; -use sp_std::{ops::RangeInclusive, prelude::*}; - -const SEED: u32 = 0; - -/// Pallet we're benchmarking here. -pub struct Pallet, I: 'static = ()>(crate::Pallet); - -/// Benchmark-specific message proof parameters. -#[derive(Debug)] -pub struct MessageProofParams { - /// Id of the lane. - pub lane: LaneId, - /// Range of messages to include in the proof. - pub message_nonces: RangeInclusive, - /// If `Some`, the proof needs to include this outbound lane data. - pub outbound_lane_data: Option, - /// If `true`, the caller expects that the proof will contain correct messages that will - /// be successfully dispatched. This is only called from the "optional" - /// `receive_single_message_proof_with_dispatch` benchmark. If you don't need it, just - /// return `true` from the `is_message_successfully_dispatched`. - pub is_successful_dispatch_expected: bool, - /// Proof size requirements. - pub size: StorageProofSize, -} - -/// Benchmark-specific message delivery proof parameters. -#[derive(Debug)] -pub struct MessageDeliveryProofParams { - /// Id of the lane. - pub lane: LaneId, - /// The proof needs to include this inbound lane data. - pub inbound_lane_data: InboundLaneData, - /// Proof size requirements. - pub size: StorageProofSize, -} - -/// Trait that must be implemented by runtime. -pub trait Config: crate::Config { - /// Lane id to use in benchmarks. - /// - /// By default, lane 00000000 is used. - fn bench_lane_id() -> LaneId { - LaneId([0, 0, 0, 0]) - } - - /// Return id of relayer account at the bridged chain. - /// - /// By default, zero account is returned. - fn bridged_relayer_id() -> Self::InboundRelayer { - Self::InboundRelayer::decode(&mut TrailingZeroInput::zeroes()).unwrap() - } - - /// Create given account and give it enough balance for test purposes. Used to create - /// relayer account at the target chain. Is strictly necessary when your rewards scheme - /// assumes that the relayer account must exist. - /// - /// Does nothing by default. - fn endow_account(_account: &Self::AccountId) {} - - /// Prepare messages proof to receive by the module. - fn prepare_message_proof( - params: MessageProofParams, - ) -> (::MessagesProof, Weight); - /// Prepare messages delivery proof to receive by the module. - fn prepare_message_delivery_proof( - params: MessageDeliveryProofParams, - ) -> >::MessagesDeliveryProof; - - /// Returns true if message has been successfully dispatched or not. - fn is_message_successfully_dispatched(_nonce: MessageNonce) -> bool { - true - } - - /// Returns true if given relayer has been rewarded for some of its actions. - fn is_relayer_rewarded(relayer: &Self::AccountId) -> bool; -} - -benchmarks_instance_pallet! { - // - // Benchmarks that are used directly by the runtime calls weight formulae. - // - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is dispatched (reminder: dispatch weight should be minimal); - // * message requires all heavy checks done by dispatcher. - // - // This is base benchmark for all other message delivery benchmarks. - receive_single_message_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with two minimal-weight messages and following conditions: - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is dispatched (reminder: dispatch weight should be minimal); - // * message requires all heavy checks done by dispatcher. - // - // The weight of single message delivery could be approximated as - // `weight(receive_two_messages_proof) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_two_messages_proof { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=22, - outbound_lane_data: None, - is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 2, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 22, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * proof includes outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is successfully dispatched (reminder: dispatch weight should be minimal); - // * message requires all heavy checks done by dispatcher. - // - // The weight of outbound lane state delivery would be - // `weight(receive_single_message_proof_with_outbound_lane_state) - weight(receive_single_message_proof)`. - // This won't be super-accurate if message has non-zero dispatch weight, but estimation should - // be close enough to real weight. - receive_single_message_proof_with_outbound_lane_state { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: Some(OutboundLaneData { - oldest_unpruned_nonce: 21, - latest_received_nonce: 20, - latest_generated_nonce: 21, - }), - is_successful_dispatch_expected: false, - size: StorageProofSize::Minimal(EXPECTED_DEFAULT_MESSAGE_LENGTH), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - let lane_state = crate::InboundLanes::::get(&T::bench_lane_id()); - assert_eq!(lane_state.last_delivered_nonce(), 21); - assert_eq!(lane_state.last_confirmed_nonce, 20); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has large leaf with total size of approximately 1KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is dispatched (reminder: dispatch weight should be minimal); - // * message requires all heavy checks done by dispatcher. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof_1_kb) / 15`. - receive_single_message_proof_1_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - is_successful_dispatch_expected: false, - size: StorageProofSize::HasLargeLeaf(1024), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - } - - // Benchmark `receive_messages_proof` extrinsic with single minimal-weight message and following conditions: - // * the proof has large leaf with total size of approximately 16KB; - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is dispatched (reminder: dispatch weight should be minimal); - // * message requires all heavy checks done by dispatcher. - // - // Size of proof grows because it contains extra trie nodes in it. - // - // With single KB of messages proof, the weight of the call is increased (roughly) by - // `(receive_single_message_proof_16KB - receive_single_message_proof) / 15`. - receive_single_message_proof_16_kb { - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - is_successful_dispatch_expected: false, - size: StorageProofSize::HasLargeLeaf(16 * 1024), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * single relayer is rewarded for relaying single message; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // This is base benchmark for all other confirmations delivery benchmarks. - receive_delivery_proof_for_single_message { - let relayer_id: T::AccountId = account("relayer", 0, SEED); - - // send message that we're going to confirm - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: relayer_id.clone(), - messages: DeliveredMessages::new(1), - }].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: StorageProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { - assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 1); - assert!(T::is_relayer_rewarded(&relayer_id)); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * single relayer is rewarded for relaying two messages; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // Additional weight for paying single-message reward to the same relayer could be computed - // as `weight(receive_delivery_proof_for_two_messages_by_single_relayer) - // - weight(receive_delivery_proof_for_single_message)`. - receive_delivery_proof_for_two_messages_by_single_relayer { - let relayer_id: T::AccountId = account("relayer", 0, SEED); - - // send message that we're going to confirm - send_regular_message::(); - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 2, - total_messages: 2, - last_delivered_nonce: 2, - }; - let mut delivered_messages = DeliveredMessages::new(1); - delivered_messages.note_dispatched_message(); - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: relayer_id.clone(), - messages: delivered_messages, - }].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: StorageProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer_id.clone()), proof, relayers_state) - verify { - assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 2); - assert!(T::is_relayer_rewarded(&relayer_id)); - } - - // Benchmark `receive_messages_delivery_proof` extrinsic with following conditions: - // * two relayers are rewarded for relaying single message each; - // * relayer account does not exist (in practice it needs to exist in production environment). - // - // Additional weight for paying reward to the next relayer could be computed - // as `weight(receive_delivery_proof_for_two_messages_by_two_relayers) - // - weight(receive_delivery_proof_for_two_messages_by_single_relayer)`. - receive_delivery_proof_for_two_messages_by_two_relayers { - let relayer1_id: T::AccountId = account("relayer1", 1, SEED); - let relayer2_id: T::AccountId = account("relayer2", 2, SEED); - - // send message that we're going to confirm - send_regular_message::(); - send_regular_message::(); - - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 2, - }; - let proof = T::prepare_message_delivery_proof(MessageDeliveryProofParams { - lane: T::bench_lane_id(), - inbound_lane_data: InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: relayer1_id.clone(), - messages: DeliveredMessages::new(1), - }, - UnrewardedRelayer { - relayer: relayer2_id.clone(), - messages: DeliveredMessages::new(2), - }, - ].into_iter().collect(), - last_confirmed_nonce: 0, - }, - size: StorageProofSize::Minimal(0), - }); - }: receive_messages_delivery_proof(RawOrigin::Signed(relayer1_id.clone()), proof, relayers_state) - verify { - assert_eq!(OutboundLanes::::get(T::bench_lane_id()).latest_received_nonce, 2); - assert!(T::is_relayer_rewarded(&relayer1_id)); - assert!(T::is_relayer_rewarded(&relayer2_id)); - } - - // - // Benchmarks that the runtime developers may use for proper pallet configuration. - // - - // This benchmark is optional and may be used when runtime developer need a way to compute - // message dispatch weight. In this case, he needs to provide messages that can go the whole - // dispatch - // - // Benchmark `receive_messages_proof` extrinsic with single message and following conditions: - // - // * proof does not include outbound lane state proof; - // * inbound lane already has state, so it needs to be read and decoded; - // * message is **SUCCESSFULLY** dispatched; - // * message requires all heavy checks done by dispatcher. - receive_single_message_proof_with_dispatch { - // maybe dispatch weight relies on the message size too? - let i in EXPECTED_DEFAULT_MESSAGE_LENGTH .. EXPECTED_DEFAULT_MESSAGE_LENGTH * 16; - - let relayer_id_on_source = T::bridged_relayer_id(); - let relayer_id_on_target = account("relayer", 0, SEED); - T::endow_account(&relayer_id_on_target); - - // mark messages 1..=20 as delivered - receive_messages::(20); - - let (proof, dispatch_weight) = T::prepare_message_proof(MessageProofParams { - lane: T::bench_lane_id(), - message_nonces: 21..=21, - outbound_lane_data: None, - is_successful_dispatch_expected: true, - size: StorageProofSize::Minimal(i), - }); - }: receive_messages_proof(RawOrigin::Signed(relayer_id_on_target), relayer_id_on_source, proof, 1, dispatch_weight) - verify { - assert_eq!( - crate::InboundLanes::::get(&T::bench_lane_id()).last_delivered_nonce(), - 21, - ); - assert!(T::is_message_successfully_dispatched(21)); - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) -} - -fn send_regular_message, I: 'static>() { - let mut outbound_lane = outbound_lane::(T::bench_lane_id()); - outbound_lane.send_message(BoundedVec::try_from(vec![]).expect("We craft valid messages")); -} - -fn receive_messages, I: 'static>(nonce: MessageNonce) { - let mut inbound_lane_storage = - RuntimeInboundLaneStorage::::from_lane_id(T::bench_lane_id()); - inbound_lane_storage.set_data(InboundLaneData { - relayers: vec![UnrewardedRelayer { - relayer: T::bridged_relayer_id(), - messages: DeliveredMessages::new(nonce), - }] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }); -} diff --git a/modules/messages/src/inbound_lane.rs b/modules/messages/src/inbound_lane.rs deleted file mode 100644 index 966ec939e70e22e830ee30157d2d7da74d59733c..0000000000000000000000000000000000000000 --- a/modules/messages/src/inbound_lane.rs +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything about incoming messages receival. - -use crate::Config; - -use bp_messages::{ - target_chain::{DispatchMessage, DispatchMessageData, MessageDispatch}, - DeliveredMessages, InboundLaneData, LaneId, MessageKey, MessageNonce, OutboundLaneData, - ReceivalResult, UnrewardedRelayer, -}; -use codec::{Decode, Encode, EncodeLike, MaxEncodedLen}; -use frame_support::traits::Get; -use scale_info::{Type, TypeInfo}; -use sp_runtime::RuntimeDebug; -use sp_std::prelude::PartialEq; - -/// Inbound lane storage. -pub trait InboundLaneStorage { - /// Id of relayer on source chain. - type Relayer: Clone + PartialEq; - - /// Lane id. - fn id(&self) -> LaneId; - /// Return maximal number of unrewarded relayer entries in inbound lane. - fn max_unrewarded_relayer_entries(&self) -> MessageNonce; - /// Return maximal number of unconfirmed messages in inbound lane. - fn max_unconfirmed_messages(&self) -> MessageNonce; - /// Get lane data from the storage. - fn get_or_init_data(&mut self) -> InboundLaneData; - /// Update lane data in the storage. - fn set_data(&mut self, data: InboundLaneData); -} - -/// Inbound lane data wrapper that implements `MaxEncodedLen`. -/// -/// We have already had `MaxEncodedLen`-like functionality before, but its usage has -/// been localized and we haven't been passing bounds (maximal count of unrewarded relayer entries, -/// maximal count of unconfirmed messages) everywhere. This wrapper allows us to avoid passing -/// these generic bounds all over the code. -/// -/// The encoding of this type matches encoding of the corresponding `MessageData`. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq)] -pub struct StoredInboundLaneData, I: 'static>(pub InboundLaneData); - -impl, I: 'static> sp_std::ops::Deref for StoredInboundLaneData { - type Target = InboundLaneData; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl, I: 'static> sp_std::ops::DerefMut for StoredInboundLaneData { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} - -impl, I: 'static> Default for StoredInboundLaneData { - fn default() -> Self { - StoredInboundLaneData(Default::default()) - } -} - -impl, I: 'static> From> - for InboundLaneData -{ - fn from(data: StoredInboundLaneData) -> Self { - data.0 - } -} - -impl, I: 'static> EncodeLike> - for InboundLaneData -{ -} - -impl, I: 'static> TypeInfo for StoredInboundLaneData { - type Identity = Self; - - fn type_info() -> Type { - InboundLaneData::::type_info() - } -} - -impl, I: 'static> MaxEncodedLen for StoredInboundLaneData { - fn max_encoded_len() -> usize { - InboundLaneData::::encoded_size_hint( - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize, - ) - .unwrap_or(usize::MAX) - } -} - -/// Inbound messages lane. -pub struct InboundLane { - storage: S, -} - -impl InboundLane { - /// Create new inbound lane backed by given storage. - pub fn new(storage: S) -> Self { - InboundLane { storage } - } - - /// Returns `mut` storage reference. - pub fn storage_mut(&mut self) -> &mut S { - &mut self.storage - } - - /// Receive state of the corresponding outbound lane. - pub fn receive_state_update( - &mut self, - outbound_lane_data: OutboundLaneData, - ) -> Option { - let mut data = self.storage.get_or_init_data(); - let last_delivered_nonce = data.last_delivered_nonce(); - - if outbound_lane_data.latest_received_nonce > last_delivered_nonce { - // this is something that should never happen if proofs are correct - return None - } - if outbound_lane_data.latest_received_nonce <= data.last_confirmed_nonce { - return None - } - - let new_confirmed_nonce = outbound_lane_data.latest_received_nonce; - data.last_confirmed_nonce = new_confirmed_nonce; - // Firstly, remove all of the records where higher nonce <= new confirmed nonce - while data - .relayers - .front() - .map(|entry| entry.messages.end <= new_confirmed_nonce) - .unwrap_or(false) - { - data.relayers.pop_front(); - } - // Secondly, update the next record with lower nonce equal to new confirmed nonce if needed. - // Note: There will be max. 1 record to update as we don't allow messages from relayers to - // overlap. - match data.relayers.front_mut() { - Some(entry) if entry.messages.begin <= new_confirmed_nonce => { - entry.messages.begin = new_confirmed_nonce + 1; - }, - _ => {}, - } - - self.storage.set_data(data); - Some(outbound_lane_data.latest_received_nonce) - } - - /// Receive new message. - pub fn receive_message( - &mut self, - relayer_at_bridged_chain: &S::Relayer, - nonce: MessageNonce, - message_data: DispatchMessageData, - ) -> ReceivalResult { - let mut data = self.storage.get_or_init_data(); - if Some(nonce) != data.last_delivered_nonce().checked_add(1) { - return ReceivalResult::InvalidNonce - } - - // if there are more unrewarded relayer entries than we may accept, reject this message - if data.relayers.len() as MessageNonce >= self.storage.max_unrewarded_relayer_entries() { - return ReceivalResult::TooManyUnrewardedRelayers - } - - // if there are more unconfirmed messages than we may accept, reject this message - let unconfirmed_messages_count = nonce.saturating_sub(data.last_confirmed_nonce); - if unconfirmed_messages_count > self.storage.max_unconfirmed_messages() { - return ReceivalResult::TooManyUnconfirmedMessages - } - - // then, dispatch message - let dispatch_result = Dispatch::dispatch(DispatchMessage { - key: MessageKey { lane_id: self.storage.id(), nonce }, - data: message_data, - }); - - // now let's update inbound lane storage - match data.relayers.back_mut() { - Some(entry) if entry.relayer == *relayer_at_bridged_chain => { - entry.messages.note_dispatched_message(); - }, - _ => { - data.relayers.push_back(UnrewardedRelayer { - relayer: relayer_at_bridged_chain.clone(), - messages: DeliveredMessages::new(nonce), - }); - }, - }; - self.storage.set_data(data); - - ReceivalResult::Dispatched(dispatch_result) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - inbound_lane, - mock::{ - dispatch_result, inbound_message_data, inbound_unrewarded_relayers_state, run_test, - unrewarded_relayer, TestMessageDispatch, TestRuntime, REGULAR_PAYLOAD, TEST_LANE_ID, - TEST_RELAYER_A, TEST_RELAYER_B, TEST_RELAYER_C, - }, - RuntimeInboundLaneStorage, - }; - use bp_messages::UnrewardedRelayersState; - - fn receive_regular_message( - lane: &mut InboundLane>, - nonce: MessageNonce, - ) { - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - nonce, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - } - - #[test] - fn receive_status_update_ignores_status_from_the_future() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 10, - ..Default::default() - }), - None, - ); - - assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 0); - }); - } - - #[test] - fn receive_status_update_ignores_obsolete_status() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - receive_regular_message(&mut lane, 2); - receive_regular_message(&mut lane, 3); - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - None, - ); - assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3); - }); - } - - #[test] - fn receive_status_update_works() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - receive_regular_message(&mut lane, 2); - receive_regular_message(&mut lane, 3); - assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 0); - assert_eq!( - lane.storage.get_or_init_data().relayers, - vec![unrewarded_relayer(1, 3, TEST_RELAYER_A)] - ); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 2, - ..Default::default() - }), - Some(2), - ); - assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 2); - assert_eq!( - lane.storage.get_or_init_data().relayers, - vec![unrewarded_relayer(3, 3, TEST_RELAYER_A)] - ); - - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3); - assert_eq!(lane.storage.get_or_init_data().relayers, vec![]); - }); - } - - #[test] - fn receive_status_update_works_with_batches_from_relayers() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let mut seed_storage_data = lane.storage.get_or_init_data(); - // Prepare data - seed_storage_data.last_confirmed_nonce = 0; - seed_storage_data.relayers.push_back(unrewarded_relayer(1, 1, TEST_RELAYER_A)); - // Simulate messages batch (2, 3, 4) from relayer #2 - seed_storage_data.relayers.push_back(unrewarded_relayer(2, 4, TEST_RELAYER_B)); - seed_storage_data.relayers.push_back(unrewarded_relayer(5, 5, TEST_RELAYER_C)); - lane.storage.set_data(seed_storage_data); - // Check - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 3, - ..Default::default() - }), - Some(3), - ); - assert_eq!(lane.storage.get_or_init_data().last_confirmed_nonce, 3); - assert_eq!( - lane.storage.get_or_init_data().relayers, - vec![ - unrewarded_relayer(4, 4, TEST_RELAYER_B), - unrewarded_relayer(5, 5, TEST_RELAYER_C) - ] - ); - }); - } - - #[test] - fn fails_to_receive_message_with_incorrect_nonce() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - 10, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::InvalidNonce - ); - assert_eq!(lane.storage.get_or_init_data().last_delivered_nonce(), 0); - }); - } - - #[test] - fn fails_to_receive_messages_above_unrewarded_relayer_entries_limit_per_lane() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = - ::MaxUnrewardedRelayerEntriesAtInboundLane::get(); - for current_nonce in 1..max_nonce + 1 { - assert_eq!( - lane.receive_message::( - &(TEST_RELAYER_A + current_nonce), - current_nonce, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - } - // Fails to dispatch new message from different than latest relayer. - assert_eq!( - lane.receive_message::( - &(TEST_RELAYER_A + max_nonce + 1), - max_nonce + 1, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::TooManyUnrewardedRelayers, - ); - // Fails to dispatch new messages from latest relayer. Prevents griefing attacks. - assert_eq!( - lane.receive_message::( - &(TEST_RELAYER_A + max_nonce), - max_nonce + 1, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::TooManyUnrewardedRelayers, - ); - }); - } - - #[test] - fn fails_to_receive_messages_above_unconfirmed_messages_limit_per_lane() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let max_nonce = ::MaxUnconfirmedMessagesAtInboundLane::get(); - for current_nonce in 1..=max_nonce { - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - current_nonce, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - } - // Fails to dispatch new message from different than latest relayer. - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_B, - max_nonce + 1, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::TooManyUnconfirmedMessages, - ); - // Fails to dispatch new messages from latest relayer. - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - max_nonce + 1, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::TooManyUnconfirmedMessages, - ); - }); - } - - #[test] - fn correctly_receives_following_messages_from_two_relayers_alternately() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - 1, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_B, - 2, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - 3, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.storage.get_or_init_data().relayers, - vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B), - unrewarded_relayer(3, 3, TEST_RELAYER_A) - ] - ); - }); - } - - #[test] - fn rejects_same_message_from_two_different_relayers() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - 1, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::Dispatched(dispatch_result(0)) - ); - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_B, - 1, - inbound_message_data(REGULAR_PAYLOAD) - ), - ReceivalResult::InvalidNonce, - ); - }); - } - - #[test] - fn correct_message_is_processed_instantly() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - assert_eq!(lane.storage.get_or_init_data().last_delivered_nonce(), 1); - }); - } - - #[test] - fn unspent_weight_is_returned_by_receive_message() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - let mut payload = REGULAR_PAYLOAD; - *payload.dispatch_result.unspent_weight.ref_time_mut() = 1; - assert_eq!( - lane.receive_message::( - &TEST_RELAYER_A, - 1, - inbound_message_data(payload) - ), - ReceivalResult::Dispatched(dispatch_result(1)) - ); - }); - } - - #[test] - fn first_message_is_confirmed_correctly() { - run_test(|| { - let mut lane = inbound_lane::(TEST_LANE_ID); - receive_regular_message(&mut lane, 1); - receive_regular_message(&mut lane, 2); - assert_eq!( - lane.receive_state_update(OutboundLaneData { - latest_received_nonce: 1, - ..Default::default() - }), - Some(1), - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 2, - }, - ); - }); - } -} diff --git a/modules/messages/src/lib.rs b/modules/messages/src/lib.rs deleted file mode 100644 index a86cb326cf0404512b7fe6ad0aa2a696ff7d0a47..0000000000000000000000000000000000000000 --- a/modules/messages/src/lib.rs +++ /dev/null @@ -1,2117 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that allows sending and receiving messages using lane concept: -//! -//! 1) the message is sent using `send_message()` call; -//! 2) every outbound message is assigned nonce; -//! 3) the messages are stored in the storage; -//! 4) external component (relay) delivers messages to bridged chain; -//! 5) messages are processed in order (ordered by assigned nonce); -//! 6) relay may send proof-of-delivery back to this chain. -//! -//! Once message is sent, its progress can be tracked by looking at module events. -//! The assigned nonce is reported using `MessageAccepted` event. When message is -//! delivered to the the bridged chain, it is reported using `MessagesDelivered` event. -//! -//! **IMPORTANT NOTE**: after generating weights (custom `WeighInfo` implementation) for -//! your runtime (where this module is plugged to), please add test for these weights. -//! The test should call the `ensure_weights_are_correct` function from this module. -//! If this test fails with your weights, then either weights are computed incorrectly, -//! or some benchmarks assumptions are broken for your runtime. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use inbound_lane::StoredInboundLaneData; -pub use outbound_lane::StoredMessagePayload; -pub use weights::WeightInfo; -pub use weights_ext::{ - ensure_able_to_receive_confirmation, ensure_able_to_receive_message, - ensure_weights_are_correct, WeightInfoExt, EXPECTED_DEFAULT_MESSAGE_LENGTH, - EXTRA_STORAGE_PROOF_SIZE, -}; - -use crate::{ - inbound_lane::{InboundLane, InboundLaneStorage}, - outbound_lane::{OutboundLane, OutboundLaneStorage, ReceivalConfirmationError}, -}; - -use bp_messages::{ - source_chain::{ - DeliveryConfirmationPayments, OnMessagesDelivered, SendMessageArtifacts, TargetHeaderChain, - }, - target_chain::{ - DeliveryPayments, DispatchMessage, MessageDispatch, ProvedLaneMessages, ProvedMessages, - SourceHeaderChain, - }, - DeliveredMessages, InboundLaneData, InboundMessageDetails, LaneId, MessageKey, MessageNonce, - MessagePayload, MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, - UnrewardedRelayersState, VerificationError, -}; -use bp_runtime::{ - BasicOperatingMode, ChainId, OwnedBridgeModule, PreComputedSize, RangeInclusiveExt, Size, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{dispatch::PostDispatchInfo, ensure, fail, traits::Get, DefaultNoBound}; -use sp_runtime::traits::UniqueSaturatedFrom; -use sp_std::{marker::PhantomData, prelude::*}; - -mod inbound_lane; -mod outbound_lane; -mod weights_ext; - -pub mod weights; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -#[cfg(test)] -mod mock; - -pub use pallet::*; - -/// The target that will be used when publishing logs related to this pallet. -pub const LOG_TARGET: &str = "runtime::bridge-messages"; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use bp_messages::{ReceivalResult, ReceivedMessages}; - use bp_runtime::RangeInclusiveExt; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - // General types - - /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; - /// Benchmarks results from runtime we're plugged into. - type WeightInfo: WeightInfoExt; - - /// Gets the chain id value from the instance. - #[pallet::constant] - type BridgedChainId: Get; - - /// Get all active outbound lanes that the message pallet is serving. - type ActiveOutboundLanes: Get<&'static [LaneId]>; - /// Maximal number of unrewarded relayer entries at inbound lane. Unrewarded means that the - /// relayer has delivered messages, but either confirmations haven't been delivered back to - /// the source chain, or we haven't received reward confirmations yet. - /// - /// This constant limits maximal number of entries in the `InboundLaneData::relayers`. Keep - /// in mind that the same relayer account may take several (non-consecutive) entries in this - /// set. - type MaxUnrewardedRelayerEntriesAtInboundLane: Get; - /// Maximal number of unconfirmed messages at inbound lane. Unconfirmed means that the - /// message has been delivered, but either confirmations haven't been delivered back to the - /// source chain, or we haven't received reward confirmations for these messages yet. - /// - /// This constant limits difference between last message from last entry of the - /// `InboundLaneData::relayers` and first message at the first entry. - /// - /// There is no point of making this parameter lesser than - /// MaxUnrewardedRelayerEntriesAtInboundLane, because then maximal number of relayer entries - /// will be limited by maximal number of messages. - /// - /// This value also represents maximal number of messages in single delivery transaction. - /// Transaction that is declaring more messages than this value, will be rejected. Even if - /// these messages are from different lanes. - type MaxUnconfirmedMessagesAtInboundLane: Get; - - /// Maximal encoded size of the outbound payload. - #[pallet::constant] - type MaximalOutboundPayloadSize: Get; - /// Payload type of outbound messages. This payload is dispatched on the bridged chain. - type OutboundPayload: Parameter + Size; - - /// Payload type of inbound messages. This payload is dispatched on this chain. - type InboundPayload: Decode; - /// Identifier of relayer that deliver messages to this chain. Relayer reward is paid on the - /// bridged chain. - type InboundRelayer: Parameter + MaxEncodedLen; - /// Delivery payments. - type DeliveryPayments: DeliveryPayments; - - // Types that are used by outbound_lane (on source chain). - - /// Target header chain. - type TargetHeaderChain: TargetHeaderChain; - /// Delivery confirmation payments. - type DeliveryConfirmationPayments: DeliveryConfirmationPayments; - /// Delivery confirmation callback. - type OnMessagesDelivered: OnMessagesDelivered; - - // Types that are used by inbound_lane (on target chain). - - /// Source header chain, as it is represented on target chain. - type SourceHeaderChain: SourceHeaderChain; - /// Message dispatch. - type MessageDispatch: MessageDispatch; - } - - /// Shortcut to messages proof type for Config. - pub type MessagesProofOf = - <>::SourceHeaderChain as SourceHeaderChain>::MessagesProof; - /// Shortcut to messages delivery proof type for Config. - pub type MessagesDeliveryProofOf = - <>::TargetHeaderChain as TargetHeaderChain< - >::OutboundPayload, - ::AccountId, - >>::MessagesDeliveryProof; - - #[pallet::pallet] - pub struct Pallet(PhantomData<(T, I)>); - - impl, I: 'static> OwnedBridgeModule for Pallet { - const LOG_TARGET: &'static str = LOG_TARGET; - type OwnerStorage = PalletOwner; - type OperatingMode = MessagesOperatingMode; - type OperatingModeStorage = PalletOperatingMode; - } - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet - where - u32: TryFrom>, - { - fn on_idle(_block: BlockNumberFor, remaining_weight: Weight) -> Weight { - // we'll need at least to read outbound lane state, kill a message and update lane state - let db_weight = T::DbWeight::get(); - if !remaining_weight.all_gte(db_weight.reads_writes(1, 2)) { - return Weight::zero() - } - - // messages from lane with index `i` in `ActiveOutboundLanes` are pruned when - // `System::block_number() % lanes.len() == i`. Otherwise we need to read lane states on - // every block, wasting the whole `remaining_weight` for nothing and causing starvation - // of the last lane pruning - let active_lanes = T::ActiveOutboundLanes::get(); - let active_lanes_len = (active_lanes.len() as u32).into(); - let active_lane_index = u32::unique_saturated_from( - frame_system::Pallet::::block_number() % active_lanes_len, - ); - let active_lane_id = active_lanes[active_lane_index as usize]; - - // first db read - outbound lane state - let mut active_lane = outbound_lane::(active_lane_id); - let mut used_weight = db_weight.reads(1); - // and here we'll have writes - used_weight += active_lane.prune_messages(db_weight, remaining_weight - used_weight); - - // we already checked we have enough `remaining_weight` to cover this `used_weight` - used_weight - } - } - - #[pallet::call] - impl, I: 'static> Pallet { - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(0)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { - >::set_owner(origin, new_owner) - } - - /// Halt or resume all/some pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(1)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - operating_mode: MessagesOperatingMode, - ) -> DispatchResult { - >::set_operating_mode(origin, operating_mode) - } - - /// Receive messages proof from bridged chain. - /// - /// The weight of the call assumes that the transaction always brings outbound lane - /// state update. Because of that, the submitter (relayer) has no benefit of not including - /// this data in the transaction, so reward confirmations lags should be minimal. - /// - /// The call fails if: - /// - /// - the pallet is halted; - /// - /// - the call origin is not `Signed(_)`; - /// - /// - there are too many messages in the proof; - /// - /// - the proof verification procedure returns an error - e.g. because header used to craft - /// proof is not imported by the associated finality pallet; - /// - /// - the `dispatch_weight` argument is not sufficient to dispatch all bundled messages. - /// - /// The call may succeed, but some messages may not be delivered e.g. if they are not fit - /// into the unrewarded relayers vector. - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::receive_messages_proof_weight(proof, *messages_count, *dispatch_weight))] - pub fn receive_messages_proof( - origin: OriginFor, - relayer_id_at_bridged_chain: T::InboundRelayer, - proof: MessagesProofOf, - messages_count: u32, - dispatch_weight: Weight, - ) -> DispatchResultWithPostInfo { - Self::ensure_not_halted().map_err(Error::::BridgeModule)?; - let relayer_id_at_this_chain = ensure_signed(origin)?; - - // reject transactions that are declaring too many messages - ensure!( - MessageNonce::from(messages_count) <= T::MaxUnconfirmedMessagesAtInboundLane::get(), - Error::::TooManyMessagesInTheProof - ); - - // if message dispatcher is currently inactive, we won't accept any messages - ensure!(T::MessageDispatch::is_active(), Error::::MessageDispatchInactive); - - // why do we need to know the weight of this (`receive_messages_proof`) call? Because - // we may want to return some funds for not-dispatching (or partially dispatching) some - // messages to the call origin (relayer). And this is done by returning actual weight - // from the call. But we only know dispatch weight of every messages. So to refund - // relayer because we have not dispatched Message, we need to: - // - // ActualWeight = DeclaredWeight - Message.DispatchWeight - // - // The DeclaredWeight is exactly what's computed here. Unfortunately it is impossible - // to get pre-computed value (and it has been already computed by the executive). - let declared_weight = T::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - dispatch_weight, - ); - let mut actual_weight = declared_weight; - - // verify messages proof && convert proof into messages - let messages = verify_and_decode_messages_proof::< - T::SourceHeaderChain, - T::InboundPayload, - >(proof, messages_count) - .map_err(|err| { - log::trace!(target: LOG_TARGET, "Rejecting invalid messages proof: {:?}", err,); - - Error::::InvalidMessagesProof - })?; - - // dispatch messages and (optionally) update lane(s) state(s) - let mut total_messages = 0; - let mut valid_messages = 0; - let mut messages_received_status = Vec::with_capacity(messages.len()); - let mut dispatch_weight_left = dispatch_weight; - for (lane_id, lane_data) in messages { - let mut lane = inbound_lane::(lane_id); - - // subtract extra storage proof bytes from the actual PoV size - there may be - // less unrewarded relayers than the maximal configured value - let lane_extra_proof_size_bytes = lane.storage_mut().extra_proof_size_bytes(); - actual_weight = actual_weight.set_proof_size( - actual_weight.proof_size().saturating_sub(lane_extra_proof_size_bytes), - ); - - if let Some(lane_state) = lane_data.lane_state { - let updated_latest_confirmed_nonce = lane.receive_state_update(lane_state); - if let Some(updated_latest_confirmed_nonce) = updated_latest_confirmed_nonce { - log::trace!( - target: LOG_TARGET, - "Received lane {:?} state update: latest_confirmed_nonce={}. Unrewarded relayers: {:?}", - lane_id, - updated_latest_confirmed_nonce, - UnrewardedRelayersState::from(&lane.storage_mut().get_or_init_data()), - ); - } - } - - let mut lane_messages_received_status = - ReceivedMessages::new(lane_id, Vec::with_capacity(lane_data.messages.len())); - for mut message in lane_data.messages { - debug_assert_eq!(message.key.lane_id, lane_id); - total_messages += 1; - - // ensure that relayer has declared enough weight for dispatching next message - // on this lane. We can't dispatch lane messages out-of-order, so if declared - // weight is not enough, let's move to next lane - let message_dispatch_weight = T::MessageDispatch::dispatch_weight(&mut message); - if message_dispatch_weight.any_gt(dispatch_weight_left) { - log::trace!( - target: LOG_TARGET, - "Cannot dispatch any more messages on lane {:?}. Weight: declared={}, left={}", - lane_id, - message_dispatch_weight, - dispatch_weight_left, - ); - - fail!(Error::::InsufficientDispatchWeight); - } - - let receival_result = lane.receive_message::( - &relayer_id_at_bridged_chain, - message.key.nonce, - message.data, - ); - - // note that we're returning unspent weight to relayer even if message has been - // rejected by the lane. This allows relayers to submit spam transactions with - // e.g. the same set of already delivered messages over and over again, without - // losing funds for messages dispatch. But keep in mind that relayer pays base - // delivery transaction cost anyway. And base cost covers everything except - // dispatch, so we have a balance here. - let unspent_weight = match &receival_result { - ReceivalResult::Dispatched(dispatch_result) => { - valid_messages += 1; - dispatch_result.unspent_weight - }, - ReceivalResult::InvalidNonce | - ReceivalResult::TooManyUnrewardedRelayers | - ReceivalResult::TooManyUnconfirmedMessages => message_dispatch_weight, - }; - lane_messages_received_status.push(message.key.nonce, receival_result); - - let unspent_weight = unspent_weight.min(message_dispatch_weight); - dispatch_weight_left -= message_dispatch_weight - unspent_weight; - actual_weight = actual_weight.saturating_sub(unspent_weight); - } - - messages_received_status.push(lane_messages_received_status); - } - - // let's now deal with relayer payments - T::DeliveryPayments::pay_reward( - relayer_id_at_this_chain, - total_messages, - valid_messages, - actual_weight, - ); - - log::debug!( - target: LOG_TARGET, - "Received messages: total={}, valid={}. Weight used: {}/{}.", - total_messages, - valid_messages, - actual_weight, - declared_weight, - ); - - Self::deposit_event(Event::MessagesReceived(messages_received_status)); - - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) - } - - /// Receive messages delivery proof from bridged chain. - #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::receive_messages_delivery_proof_weight( - proof, - relayers_state, - ))] - pub fn receive_messages_delivery_proof( - origin: OriginFor, - proof: MessagesDeliveryProofOf, - mut relayers_state: UnrewardedRelayersState, - ) -> DispatchResultWithPostInfo { - Self::ensure_not_halted().map_err(Error::::BridgeModule)?; - - let proof_size = proof.size(); - let confirmation_relayer = ensure_signed(origin)?; - let (lane_id, lane_data) = T::TargetHeaderChain::verify_messages_delivery_proof(proof) - .map_err(|err| { - log::trace!( - target: LOG_TARGET, - "Rejecting invalid messages delivery proof: {:?}", - err, - ); - - Error::::InvalidMessagesDeliveryProof - })?; - ensure!( - relayers_state.is_valid(&lane_data), - Error::::InvalidUnrewardedRelayersState - ); - - // mark messages as delivered - let mut lane = outbound_lane::(lane_id); - let last_delivered_nonce = lane_data.last_delivered_nonce(); - let confirmed_messages = lane - .confirm_delivery( - relayers_state.total_messages, - last_delivered_nonce, - &lane_data.relayers, - ) - .map_err(Error::::ReceivalConfirmation)?; - - if let Some(confirmed_messages) = confirmed_messages { - // emit 'delivered' event - let received_range = confirmed_messages.begin..=confirmed_messages.end; - Self::deposit_event(Event::MessagesDelivered { - lane_id, - messages: confirmed_messages, - }); - - // if some new messages have been confirmed, reward relayers - let actually_rewarded_relayers = T::DeliveryConfirmationPayments::pay_reward( - lane_id, - lane_data.relayers, - &confirmation_relayer, - &received_range, - ); - - // update relayers state with actual numbers to compute actual weight below - relayers_state.unrewarded_relayer_entries = sp_std::cmp::min( - relayers_state.unrewarded_relayer_entries, - actually_rewarded_relayers, - ); - relayers_state.total_messages = sp_std::cmp::min( - relayers_state.total_messages, - received_range.checked_len().unwrap_or(MessageNonce::MAX), - ); - }; - - log::trace!( - target: LOG_TARGET, - "Received messages delivery proof up to (and including) {} at lane {:?}", - last_delivered_nonce, - lane_id, - ); - - // notify others about messages delivery - T::OnMessagesDelivered::on_messages_delivered( - lane_id, - lane.data().queued_messages().saturating_len(), - ); - - // because of lags, the inbound lane state (`lane_data`) may have entries for - // already rewarded relayers and messages (if all entries are duplicated, then - // this transaction must be filtered out by our signed extension) - let actual_weight = T::WeightInfo::receive_messages_delivery_proof_weight( - &PreComputedSize(proof_size as usize), - &relayers_state, - ); - - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee: Pays::Yes }) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event, I: 'static = ()> { - /// Message has been accepted and is waiting to be delivered. - MessageAccepted { - /// Lane, which has accepted the message. - lane_id: LaneId, - /// Nonce of accepted message. - nonce: MessageNonce, - }, - /// Messages have been received from the bridged chain. - MessagesReceived( - /// Result of received messages dispatch. - Vec::DispatchLevelResult>>, - ), - /// Messages in the inclusive range have been delivered to the bridged chain. - MessagesDelivered { - /// Lane for which the delivery has been confirmed. - lane_id: LaneId, - /// Delivered messages. - messages: DeliveredMessages, - }, - } - - #[pallet::error] - #[derive(PartialEq, Eq)] - pub enum Error { - /// Pallet is not in Normal operating mode. - NotOperatingNormally, - /// The outbound lane is inactive. - InactiveOutboundLane, - /// The inbound message dispatcher is inactive. - MessageDispatchInactive, - /// Message has been treated as invalid by chain verifier. - MessageRejectedByChainVerifier(VerificationError), - /// Message has been treated as invalid by the pallet logic. - MessageRejectedByPallet(VerificationError), - /// Submitter has failed to pay fee for delivering and dispatching messages. - FailedToWithdrawMessageFee, - /// The transaction brings too many messages. - TooManyMessagesInTheProof, - /// Invalid messages has been submitted. - InvalidMessagesProof, - /// Invalid messages delivery proof has been submitted. - InvalidMessagesDeliveryProof, - /// The relayer has declared invalid unrewarded relayers state in the - /// `receive_messages_delivery_proof` call. - InvalidUnrewardedRelayersState, - /// The cumulative dispatch weight, passed by relayer is not enough to cover dispatch - /// of all bundled messages. - InsufficientDispatchWeight, - /// The message someone is trying to work with (i.e. increase fee) is not yet sent. - MessageIsNotYetSent, - /// Error confirming messages receival. - ReceivalConfirmation(ReceivalConfirmationError), - /// Error generated by the `OwnedBridgeModule` trait. - BridgeModule(bp_runtime::OwnedBridgeModuleError), - } - - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume it. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - #[pallet::storage] - #[pallet::getter(fn module_owner)] - pub type PalletOwner, I: 'static = ()> = StorageValue<_, T::AccountId>; - - /// The current operating mode of the pallet. - /// - /// Depending on the mode either all, some, or no transactions will be allowed. - #[pallet::storage] - #[pallet::getter(fn operating_mode)] - pub type PalletOperatingMode, I: 'static = ()> = - StorageValue<_, MessagesOperatingMode, ValueQuery>; - - /// Map of lane id => inbound lane data. - #[pallet::storage] - pub type InboundLanes, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, LaneId, StoredInboundLaneData, ValueQuery>; - - /// Map of lane id => outbound lane data. - #[pallet::storage] - pub type OutboundLanes, I: 'static = ()> = StorageMap< - Hasher = Blake2_128Concat, - Key = LaneId, - Value = OutboundLaneData, - QueryKind = ValueQuery, - OnEmpty = GetDefault, - MaxValues = MaybeOutboundLanesCount, - >; - - /// Map of lane id => is congested signal sent. It is managed by the - /// `bridge_runtime_common::LocalXcmQueueManager`. - /// - /// **bridges-v1**: this map is a temporary hack and will be dropped in the `v2`. We can emulate - /// a storage map using `sp_io::unhashed` storage functions, but then benchmarks are not - /// accounting its `proof_size`, so it is missing from the final weights. So we need to make it - /// a map inside some pallet. We could use a simply value instead of map here, because - /// in `v1` we'll only have a single lane. But in the case of adding another lane before `v2`, - /// it'll be easier to deal with the isolated storage map instead. - #[pallet::storage] - pub type OutboundLanesCongestedSignals, I: 'static = ()> = StorageMap< - Hasher = Blake2_128Concat, - Key = LaneId, - Value = bool, - QueryKind = ValueQuery, - OnEmpty = GetDefault, - MaxValues = MaybeOutboundLanesCount, - >; - - /// All queued outbound messages. - #[pallet::storage] - pub type OutboundMessages, I: 'static = ()> = - StorageMap<_, Blake2_128Concat, MessageKey, StoredMessagePayload>; - - #[pallet::genesis_config] - #[derive(DefaultNoBound)] - pub struct GenesisConfig, I: 'static = ()> { - /// Initial pallet operating mode. - pub operating_mode: MessagesOperatingMode, - /// Initial pallet owner. - pub owner: Option, - /// Dummy marker. - pub phantom: sp_std::marker::PhantomData, - } - - #[pallet::genesis_build] - impl, I: 'static> BuildGenesisConfig for GenesisConfig { - fn build(&self) { - PalletOperatingMode::::put(self.operating_mode); - if let Some(ref owner) = self.owner { - PalletOwner::::put(owner); - } - } - } - - impl, I: 'static> Pallet { - /// Get stored data of the outbound message with given nonce. - pub fn outbound_message_data(lane: LaneId, nonce: MessageNonce) -> Option { - OutboundMessages::::get(MessageKey { lane_id: lane, nonce }).map(Into::into) - } - - /// Prepare data, related to given inbound message. - pub fn inbound_message_data( - lane: LaneId, - payload: MessagePayload, - outbound_details: OutboundMessageDetails, - ) -> InboundMessageDetails { - let mut dispatch_message = DispatchMessage { - key: MessageKey { lane_id: lane, nonce: outbound_details.nonce }, - data: payload.into(), - }; - InboundMessageDetails { - dispatch_weight: T::MessageDispatch::dispatch_weight(&mut dispatch_message), - } - } - - /// Return outbound lane data. - pub fn outbound_lane_data(lane: LaneId) -> OutboundLaneData { - OutboundLanes::::get(lane) - } - - /// Return inbound lane data. - pub fn inbound_lane_data(lane: LaneId) -> InboundLaneData { - InboundLanes::::get(lane).0 - } - } - - /// Get-parameter that returns number of active outbound lanes that the pallet maintains. - pub struct MaybeOutboundLanesCount(PhantomData<(T, I)>); - - impl, I: 'static> Get> for MaybeOutboundLanesCount { - fn get() -> Option { - Some(T::ActiveOutboundLanes::get().len() as u32) - } - } -} - -/// Structure, containing a validated message payload and all the info required -/// to send it on the bridge. -#[derive(Debug, PartialEq, Eq)] -pub struct SendMessageArgs, I: 'static> { - lane_id: LaneId, - payload: StoredMessagePayload, -} - -impl bp_messages::source_chain::MessagesBridge for Pallet -where - T: Config, - I: 'static, -{ - type Error = Error; - type SendMessageArgs = SendMessageArgs; - - fn validate_message( - lane: LaneId, - message: &T::OutboundPayload, - ) -> Result, Self::Error> { - ensure_normal_operating_mode::()?; - - // let's check if outbound lane is active - ensure!(T::ActiveOutboundLanes::get().contains(&lane), Error::::InactiveOutboundLane); - - // let's first check if message can be delivered to target chain - T::TargetHeaderChain::verify_message(message).map_err(|err| { - log::trace!( - target: LOG_TARGET, - "Message to lane {:?} is rejected by target chain: {:?}", - lane, - err, - ); - - Error::::MessageRejectedByChainVerifier(err) - })?; - - Ok(SendMessageArgs { - lane_id: lane, - payload: StoredMessagePayload::::try_from(message.encode()).map_err(|_| { - Error::::MessageRejectedByPallet(VerificationError::MessageTooLarge) - })?, - }) - } - - fn send_message(args: SendMessageArgs) -> SendMessageArtifacts { - // save message in outbound storage and emit event - let mut lane = outbound_lane::(args.lane_id); - let message_len = args.payload.len(); - let nonce = lane.send_message(args.payload); - - // return number of messages in the queue to let sender know about its state - let enqueued_messages = lane.data().queued_messages().saturating_len(); - - log::trace!( - target: LOG_TARGET, - "Accepted message {} to lane {:?}. Message size: {:?}", - nonce, - args.lane_id, - message_len, - ); - - Pallet::::deposit_event(Event::MessageAccepted { lane_id: args.lane_id, nonce }); - - SendMessageArtifacts { nonce, enqueued_messages } - } -} - -/// Ensure that the pallet is in normal operational mode. -fn ensure_normal_operating_mode, I: 'static>() -> Result<(), Error> { - if PalletOperatingMode::::get() == - MessagesOperatingMode::Basic(BasicOperatingMode::Normal) - { - return Ok(()) - } - - Err(Error::::NotOperatingNormally) -} - -/// Creates new inbound lane object, backed by runtime storage. -fn inbound_lane, I: 'static>( - lane_id: LaneId, -) -> InboundLane> { - InboundLane::new(RuntimeInboundLaneStorage::from_lane_id(lane_id)) -} - -/// Creates new outbound lane object, backed by runtime storage. -fn outbound_lane, I: 'static>( - lane_id: LaneId, -) -> OutboundLane> { - OutboundLane::new(RuntimeOutboundLaneStorage { lane_id, _phantom: Default::default() }) -} - -/// Runtime inbound lane storage. -struct RuntimeInboundLaneStorage, I: 'static = ()> { - lane_id: LaneId, - cached_data: Option>, - _phantom: PhantomData, -} - -impl, I: 'static> RuntimeInboundLaneStorage { - /// Creates new runtime inbound lane storage. - fn from_lane_id(lane_id: LaneId) -> RuntimeInboundLaneStorage { - RuntimeInboundLaneStorage { lane_id, cached_data: None, _phantom: Default::default() } - } -} - -impl, I: 'static> RuntimeInboundLaneStorage { - /// Returns number of bytes that may be subtracted from the PoV component of - /// `receive_messages_proof` call, because the actual inbound lane state is smaller than the - /// maximal configured. - /// - /// Maximal inbound lane state set size is configured by the - /// `MaxUnrewardedRelayerEntriesAtInboundLane` constant from the pallet configuration. The PoV - /// of the call includes the maximal size of inbound lane state. If the actual size is smaller, - /// we may subtract extra bytes from this component. - pub fn extra_proof_size_bytes(&mut self) -> u64 { - let max_encoded_len = StoredInboundLaneData::::max_encoded_len(); - let relayers_count = self.get_or_init_data().relayers.len(); - let actual_encoded_len = - InboundLaneData::::encoded_size_hint(relayers_count) - .unwrap_or(usize::MAX); - max_encoded_len.saturating_sub(actual_encoded_len) as _ - } -} - -impl, I: 'static> InboundLaneStorage for RuntimeInboundLaneStorage { - type Relayer = T::InboundRelayer; - - fn id(&self) -> LaneId { - self.lane_id - } - - fn max_unrewarded_relayer_entries(&self) -> MessageNonce { - T::MaxUnrewardedRelayerEntriesAtInboundLane::get() - } - - fn max_unconfirmed_messages(&self) -> MessageNonce { - T::MaxUnconfirmedMessagesAtInboundLane::get() - } - - fn get_or_init_data(&mut self) -> InboundLaneData { - match self.cached_data { - Some(ref data) => data.clone(), - None => { - let data: InboundLaneData = - InboundLanes::::get(self.lane_id).into(); - self.cached_data = Some(data.clone()); - data - }, - } - } - - fn set_data(&mut self, data: InboundLaneData) { - self.cached_data = Some(data.clone()); - InboundLanes::::insert(self.lane_id, StoredInboundLaneData::(data)) - } -} - -/// Runtime outbound lane storage. -struct RuntimeOutboundLaneStorage { - lane_id: LaneId, - _phantom: PhantomData<(T, I)>, -} - -impl, I: 'static> OutboundLaneStorage for RuntimeOutboundLaneStorage { - type StoredMessagePayload = StoredMessagePayload; - - fn id(&self) -> LaneId { - self.lane_id - } - - fn data(&self) -> OutboundLaneData { - OutboundLanes::::get(self.lane_id) - } - - fn set_data(&mut self, data: OutboundLaneData) { - OutboundLanes::::insert(self.lane_id, data) - } - - #[cfg(test)] - fn message(&self, nonce: &MessageNonce) -> Option { - OutboundMessages::::get(MessageKey { lane_id: self.lane_id, nonce: *nonce }) - } - - fn save_message(&mut self, nonce: MessageNonce, message_payload: Self::StoredMessagePayload) { - OutboundMessages::::insert( - MessageKey { lane_id: self.lane_id, nonce }, - message_payload, - ); - } - - fn remove_message(&mut self, nonce: &MessageNonce) { - OutboundMessages::::remove(MessageKey { lane_id: self.lane_id, nonce: *nonce }); - } -} - -/// Verify messages proof and return proved messages with decoded payload. -fn verify_and_decode_messages_proof( - proof: Chain::MessagesProof, - messages_count: u32, -) -> Result>, VerificationError> { - // `receive_messages_proof` weight formula and `MaxUnconfirmedMessagesAtInboundLane` check - // guarantees that the `message_count` is sane and Vec may be allocated. - // (tx with too many messages will either be rejected from the pool, or will fail earlier) - Chain::verify_messages_proof(proof, messages_count).map(|messages_by_lane| { - messages_by_lane - .into_iter() - .map(|(lane, lane_data)| { - ( - lane, - ProvedLaneMessages { - lane_state: lane_data.lane_state, - messages: lane_data.messages.into_iter().map(Into::into).collect(), - }, - ) - }) - .collect() - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - inbound_unrewarded_relayers_state, message, message_payload, run_test, - unrewarded_relayer, AccountId, DbWeight, RuntimeEvent as TestEvent, RuntimeOrigin, - TestDeliveryConfirmationPayments, TestDeliveryPayments, TestMessageDispatch, - TestMessagesDeliveryProof, TestMessagesProof, TestOnMessagesDelivered, TestRelayer, - TestRuntime, TestWeightInfo, MAX_OUTBOUND_PAYLOAD_SIZE, - PAYLOAD_REJECTED_BY_TARGET_CHAIN, REGULAR_PAYLOAD, TEST_LANE_ID, TEST_LANE_ID_2, - TEST_LANE_ID_3, TEST_RELAYER_A, TEST_RELAYER_B, - }, - outbound_lane::ReceivalConfirmationError, - }; - use bp_messages::{ - source_chain::MessagesBridge, BridgeMessagesCall, UnrewardedRelayer, - UnrewardedRelayersState, - }; - use bp_test_utils::generate_owned_bridge_module_tests; - use frame_support::{ - assert_noop, assert_ok, - dispatch::Pays, - storage::generator::{StorageMap, StorageValue}, - traits::Hooks, - weights::Weight, - }; - use frame_system::{EventRecord, Pallet as System, Phase}; - use sp_runtime::DispatchError; - - fn get_ready_for_events() { - System::::set_block_number(1); - System::::reset_events(); - } - - fn send_regular_message(lane_id: LaneId) { - get_ready_for_events(); - - let outbound_lane = outbound_lane::(lane_id); - let message_nonce = outbound_lane.data().latest_generated_nonce + 1; - let prev_enqueud_messages = outbound_lane.data().queued_messages().saturating_len(); - let valid_message = Pallet::::validate_message(lane_id, ®ULAR_PAYLOAD) - .expect("validate_message has failed"); - let artifacts = Pallet::::send_message(valid_message); - assert_eq!(artifacts.enqueued_messages, prev_enqueud_messages + 1); - - // check event with assigned nonce - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessageAccepted { - lane_id, - nonce: message_nonce - }), - topics: vec![], - }], - ); - } - - fn receive_messages_delivery_proof() { - System::::set_block_number(1); - System::::reset_events(); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: DeliveredMessages::new(1), - }] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Messages(Event::MessagesDelivered { - lane_id: TEST_LANE_ID, - messages: DeliveredMessages::new(1), - }), - topics: vec![], - }], - ); - } - - #[test] - fn pallet_rejects_transactions_if_halted() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - PalletOperatingMode::::put(MessagesOperatingMode::Basic( - BasicOperatingMode::Halted, - )); - - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), - Error::::NotOperatingNormally, - ); - - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(2, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), - ); - - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - ), - Error::::BridgeModule(bp_runtime::OwnedBridgeModuleError::Halted), - ); - }); - } - - #[test] - fn pallet_rejects_new_messages_in_rejecting_outbound_messages_operating_mode() { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - PalletOperatingMode::::put( - MessagesOperatingMode::RejectingOutboundMessages, - ); - - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, ®ULAR_PAYLOAD), - Error::::NotOperatingNormally, - ); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ),); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - }); - } - - #[test] - fn send_message_works() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - }); - } - - #[test] - fn send_message_rejects_too_large_message() { - run_test(|| { - let mut message_payload = message_payload(1, 0); - // the payload isn't simply extra, so it'll definitely overflow - // `MAX_OUTBOUND_PAYLOAD_SIZE` if we add `MAX_OUTBOUND_PAYLOAD_SIZE` bytes to extra - message_payload - .extra - .extend_from_slice(&[0u8; MAX_OUTBOUND_PAYLOAD_SIZE as usize]); - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID, &message_payload.clone(),), - Error::::MessageRejectedByPallet( - VerificationError::MessageTooLarge - ), - ); - - // let's check that we're able to send `MAX_OUTBOUND_PAYLOAD_SIZE` messages - while message_payload.encoded_size() as u32 > MAX_OUTBOUND_PAYLOAD_SIZE { - message_payload.extra.pop(); - } - assert_eq!(message_payload.encoded_size() as u32, MAX_OUTBOUND_PAYLOAD_SIZE); - - let valid_message = - Pallet::::validate_message(TEST_LANE_ID, &message_payload) - .expect("validate_message has failed"); - Pallet::::send_message(valid_message); - }) - } - - #[test] - fn chain_verifier_rejects_invalid_message_in_send_message() { - run_test(|| { - // messages with this payload are rejected by target chain verifier - assert_noop!( - Pallet::::validate_message( - TEST_LANE_ID, - &PAYLOAD_REJECTED_BY_TARGET_CHAIN, - ), - Error::::MessageRejectedByChainVerifier(VerificationError::Other( - mock::TEST_ERROR - )), - ); - }); - } - - #[test] - fn receive_messages_proof_works() { - run_test(|| { - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).0.last_delivered_nonce(), 1); - - assert!(TestDeliveryPayments::is_reward_paid(1)); - }); - } - - #[test] - fn receive_messages_proof_updates_confirmed_message_nonce() { - run_test(|| { - // say we have received 10 messages && last confirmed message is 8 - InboundLanes::::insert( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 8, - relayers: vec![ - unrewarded_relayer(9, 9, TEST_RELAYER_A), - unrewarded_relayer(10, 10, TEST_RELAYER_B), - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 10, - }, - ); - - // message proof includes outbound lane state with latest confirmed message updated to 9 - let mut message_proof: TestMessagesProof = - Ok(vec![message(11, REGULAR_PAYLOAD)]).into(); - message_proof.result.as_mut().unwrap()[0].1.lane_state = - Some(OutboundLaneData { latest_received_nonce: 9, ..Default::default() }); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - message_proof, - 1, - REGULAR_PAYLOAD.declared_weight, - )); - - assert_eq!( - InboundLanes::::get(TEST_LANE_ID).0, - InboundLaneData { - last_confirmed_nonce: 9, - relayers: vec![ - unrewarded_relayer(10, 10, TEST_RELAYER_B), - unrewarded_relayer(11, 11, TEST_RELAYER_A) - ] - .into_iter() - .collect(), - }, - ); - assert_eq!( - inbound_unrewarded_relayers_state(TEST_LANE_ID), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 11, - }, - ); - }); - } - - #[test] - fn receive_messages_fails_if_dispatcher_is_inactive() { - run_test(|| { - TestMessageDispatch::deactivate(); - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - REGULAR_PAYLOAD.declared_weight, - ), - Error::::MessageDispatchInactive, - ); - }); - } - - #[test] - fn receive_messages_proof_does_not_accept_message_if_dispatch_weight_is_not_enough() { - run_test(|| { - let mut declared_weight = REGULAR_PAYLOAD.declared_weight; - *declared_weight.ref_time_mut() -= 1; - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - 1, - declared_weight, - ), - Error::::InsufficientDispatchWeight - ); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); - }); - } - - #[test] - fn receive_messages_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Err(()).into(), - 1, - Weight::zero(), - ), - Error::::InvalidMessagesProof, - ); - }); - } - - #[test] - fn receive_messages_proof_rejects_proof_with_too_many_messages() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![message(1, REGULAR_PAYLOAD)]).into(), - u32::MAX, - Weight::zero(), - ), - Error::::TooManyMessagesInTheProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_works() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - receive_messages_delivery_proof(); - - assert_eq!( - OutboundLanes::::get(TEST_LANE_ID).latest_received_nonce, - 1, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rewards_relayers() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - // this reports delivery of message 1 => reward is paid to TEST_RELAYER_A - let single_message_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)].into_iter().collect(), - ..Default::default() - }, - ))); - let single_message_delivery_proof_size = single_message_delivery_proof.size(); - let result = Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - single_message_delivery_proof, - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - ); - assert_ok!(result); - assert_eq!( - result.unwrap().actual_weight.unwrap(), - TestWeightInfo::receive_messages_delivery_proof_weight( - &PreComputedSize(single_message_delivery_proof_size as _), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - ) - ); - assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); - assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); - assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 1))); - - // this reports delivery of both message 1 and message 2 => reward is paid only to - // TEST_RELAYER_B - let two_messages_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B), - ] - .into_iter() - .collect(), - ..Default::default() - }, - ))); - let two_messages_delivery_proof_size = two_messages_delivery_proof.size(); - let result = Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - two_messages_delivery_proof, - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - messages_in_oldest_entry: 1, - total_messages: 2, - last_delivered_nonce: 2, - }, - ); - assert_ok!(result); - // even though the pre-dispatch weight was for two messages, the actual weight is - // for single message only - assert_eq!( - result.unwrap().actual_weight.unwrap(), - TestWeightInfo::receive_messages_delivery_proof_weight( - &PreComputedSize(two_messages_delivery_proof_size as _), - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - ..Default::default() - }, - ) - ); - assert!(!TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_A, 1)); - assert!(TestDeliveryConfirmationPayments::is_reward_paid(TEST_RELAYER_B, 1)); - assert_eq!(TestOnMessagesDelivered::call_arguments(), Some((TEST_LANE_ID, 0))); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_invalid_proof() { - run_test(|| { - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Err(())), - Default::default(), - ), - Error::::InvalidMessagesDeliveryProof, - ); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_declared_relayers_state_is_invalid() { - run_test(|| { - // when number of relayers entries is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 2, - last_delivered_nonce: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when number of messages is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 1, - last_delivered_nonce: 2, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - - // when last delivered nonce is invalid - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - relayers: vec![ - unrewarded_relayer(1, 1, TEST_RELAYER_A), - unrewarded_relayer(2, 2, TEST_RELAYER_B) - ] - .into_iter() - .collect(), - ..Default::default() - } - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 2, - total_messages: 2, - last_delivered_nonce: 8, - ..Default::default() - }, - ), - Error::::InvalidUnrewardedRelayersState, - ); - }); - } - - #[test] - fn receive_messages_accepts_single_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(1, REGULAR_PAYLOAD); - invalid_message.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok(vec![invalid_message]).into(), - 1, - Weight::zero(), /* weight may be zero in this case (all messages are - * improperly encoded) */ - ),); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 1,); - }); - } - - #[test] - fn receive_messages_accepts_batch_with_message_with_invalid_payload() { - run_test(|| { - let mut invalid_message = message(2, REGULAR_PAYLOAD); - invalid_message.payload = Vec::new(); - - assert_ok!(Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - Ok( - vec![message(1, REGULAR_PAYLOAD), invalid_message, message(3, REGULAR_PAYLOAD),] - ) - .into(), - 3, - REGULAR_PAYLOAD.declared_weight + REGULAR_PAYLOAD.declared_weight, - ),); - - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 3,); - }); - } - - #[test] - fn actual_dispatch_weight_does_not_overlow() { - run_test(|| { - let message1 = message(1, message_payload(0, u64::MAX / 2)); - let message2 = message(2, message_payload(0, u64::MAX / 2)); - let message3 = message(3, message_payload(0, u64::MAX / 2)); - - assert_noop!( - Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - // this may cause overflow if source chain storage is invalid - Ok(vec![message1, message2, message3]).into(), - 3, - Weight::MAX, - ), - Error::::InsufficientDispatchWeight - ); - assert_eq!(InboundLanes::::get(TEST_LANE_ID).last_delivered_nonce(), 0); - }); - } - - #[test] - fn ref_time_refund_from_receive_messages_proof_works() { - run_test(|| { - fn submit_with_unspent_weight( - nonce: MessageNonce, - unspent_weight: u64, - ) -> (Weight, Weight) { - let mut payload = REGULAR_PAYLOAD; - *payload.dispatch_result.unspent_weight.ref_time_mut() = unspent_weight; - let proof = Ok(vec![message(nonce, payload)]).into(); - let messages_count = 1; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); - let result = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .expect("delivery has failed"); - let post_dispatch_weight = - result.actual_weight.expect("receive_messages_proof always returns Some"); - - // message delivery transactions are never free - assert_eq!(result.pays_fee, Pays::Yes); - - (pre_dispatch_weight, post_dispatch_weight) - } - - // when dispatch is returning `unspent_weight < declared_weight` - let (pre, post) = submit_with_unspent_weight(1, 1); - assert_eq!(post.ref_time(), pre.ref_time() - 1); - - // when dispatch is returning `unspent_weight = declared_weight` - let (pre, post) = - submit_with_unspent_weight(2, REGULAR_PAYLOAD.declared_weight.ref_time()); - assert_eq!( - post.ref_time(), - pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time() - ); - - // when dispatch is returning `unspent_weight > declared_weight` - let (pre, post) = - submit_with_unspent_weight(3, REGULAR_PAYLOAD.declared_weight.ref_time() + 1); - assert_eq!( - post.ref_time(), - pre.ref_time() - REGULAR_PAYLOAD.declared_weight.ref_time() - ); - - // when there's no unspent weight - let (pre, post) = submit_with_unspent_weight(4, 0); - assert_eq!(post.ref_time(), pre.ref_time()); - - // when dispatch is returning `unspent_weight < declared_weight` - let (pre, post) = submit_with_unspent_weight(5, 1); - assert_eq!(post.ref_time(), pre.ref_time() - 1); - }); - } - - #[test] - fn proof_size_refund_from_receive_messages_proof_works() { - run_test(|| { - let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize; - - // if there's maximal number of unrewarded relayer entries at the inbound lane, then - // `proof_size` is unchanged in post-dispatch weight - let proof: TestMessagesProof = Ok(vec![message(101, REGULAR_PAYLOAD)]).into(); - let messages_count = 1; - let pre_dispatch_weight = - ::WeightInfo::receive_messages_proof_weight( - &proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ); - InboundLanes::::insert( - TEST_LANE_ID, - StoredInboundLaneData(InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: 42, - messages: DeliveredMessages { begin: 0, end: 100 } - }; - max_entries - ] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }), - ); - let post_dispatch_weight = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof.clone(), - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .unwrap() - .actual_weight - .unwrap(); - assert_eq!(post_dispatch_weight.proof_size(), pre_dispatch_weight.proof_size()); - - // if count of unrewarded relayer entries is less than maximal, then some `proof_size` - // must be refunded - InboundLanes::::insert( - TEST_LANE_ID, - StoredInboundLaneData(InboundLaneData { - relayers: vec![ - UnrewardedRelayer { - relayer: 42, - messages: DeliveredMessages { begin: 0, end: 100 } - }; - max_entries - 1 - ] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }), - ); - let post_dispatch_weight = Pallet::::receive_messages_proof( - RuntimeOrigin::signed(1), - TEST_RELAYER_A, - proof, - messages_count, - REGULAR_PAYLOAD.declared_weight, - ) - .unwrap() - .actual_weight - .unwrap(); - assert!( - post_dispatch_weight.proof_size() < pre_dispatch_weight.proof_size(), - "Expected post-dispatch PoV {} to be less than pre-dispatch PoV {}", - post_dispatch_weight.proof_size(), - pre_dispatch_weight.proof_size(), - ); - }); - } - - #[test] - fn messages_delivered_callbacks_are_called() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - // messages 1+2 are confirmed in 1 tx, message 3 in a separate tx - // dispatch of message 2 has failed - let mut delivered_messages_1_and_2 = DeliveredMessages::new(1); - delivered_messages_1_and_2.note_dispatched_message(); - let messages_1_and_2_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: delivered_messages_1_and_2.clone(), - }] - .into_iter() - .collect(), - }, - )); - let delivered_message_3 = DeliveredMessages::new(3); - let messages_3_proof = Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 0, - relayers: vec![UnrewardedRelayer { relayer: 0, messages: delivered_message_3 }] - .into_iter() - .collect(), - }, - )); - - // first tx with messages 1+2 - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(messages_1_and_2_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 2, - total_messages: 2, - last_delivered_nonce: 2, - }, - )); - // second tx with message 3 - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(messages_3_proof), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 3, - }, - )); - }); - } - - #[test] - fn receive_messages_delivery_proof_rejects_proof_if_trying_to_confirm_more_messages_than_expected( - ) { - run_test(|| { - // send message first to be able to check that delivery_proof fails later - send_regular_message(TEST_LANE_ID); - - // 1) InboundLaneData declares that the `last_confirmed_nonce` is 1; - // 2) InboundLaneData has no entries => `InboundLaneData::last_delivered_nonce()` - // returns `last_confirmed_nonce`; - // 3) it means that we're going to confirm delivery of messages 1..=1; - // 4) so the number of declared messages (see `UnrewardedRelayersState`) is `0` and - // numer of actually confirmed messages is `1`. - assert_noop!( - Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { last_confirmed_nonce: 1, relayers: Default::default() }, - ))), - UnrewardedRelayersState { last_delivered_nonce: 1, ..Default::default() }, - ), - Error::::ReceivalConfirmation( - ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected - ), - ); - }); - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - PalletOperatingMode::::storage_value_final_key().to_vec(), - bp_messages::storage_keys::operating_mode_key("Messages").0, - ); - - assert_eq!( - OutboundMessages::::storage_map_final_key(MessageKey { - lane_id: TEST_LANE_ID, - nonce: 42 - }), - bp_messages::storage_keys::message_key("Messages", &TEST_LANE_ID, 42).0, - ); - - assert_eq!( - OutboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::outbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - - assert_eq!( - InboundLanes::::storage_map_final_key(TEST_LANE_ID), - bp_messages::storage_keys::inbound_lane_data_key("Messages", &TEST_LANE_ID).0, - ); - } - - #[test] - fn inbound_message_details_works() { - run_test(|| { - assert_eq!( - Pallet::::inbound_message_data( - TEST_LANE_ID, - REGULAR_PAYLOAD.encode(), - OutboundMessageDetails { nonce: 0, dispatch_weight: Weight::zero(), size: 0 }, - ), - InboundMessageDetails { dispatch_weight: REGULAR_PAYLOAD.declared_weight }, - ); - }); - } - - #[test] - fn on_idle_callback_respects_remaining_weight() { - run_test(|| { - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - send_regular_message(TEST_LANE_ID); - - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 4, - relayers: vec![unrewarded_relayer(1, 4, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 4, - total_messages: 4, - last_delivered_nonce: 4, - }, - )); - - // all 4 messages may be pruned now - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, - 4 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - System::::set_block_number(2); - - // if passed wight is too low to do anything - let dbw = DbWeight::get(); - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 1)), - Weight::zero(), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - - // if passed wight is enough to prune single message - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 2)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - - // if passed wight is enough to prune two more messages - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(1, 3)), - dbw.reads_writes(1, 3), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 4 - ); - - // if passed wight is enough to prune many messages - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 5 - ); - }); - } - - #[test] - fn on_idle_callback_is_rotating_lanes_to_prune() { - run_test(|| { - // send + receive confirmation for lane 1 - send_regular_message(TEST_LANE_ID); - receive_messages_delivery_proof(); - // send + receive confirmation for lane 2 - send_regular_message(TEST_LANE_ID_2); - assert_ok!(Pallet::::receive_messages_delivery_proof( - RuntimeOrigin::signed(1), - TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID_2, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![unrewarded_relayer(1, 1, TEST_RELAYER_A)] - .into_iter() - .collect(), - }, - ))), - UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }, - )); - - // nothing is pruned yet - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().latest_received_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().latest_received_nonce, - 1 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 1 - ); - - // in block#2.on_idle lane messages of lane 1 are pruned - let dbw = DbWeight::get(); - System::::set_block_number(2); - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 1 - ); - - // in block#3.on_idle lane messages of lane 2 are pruned - System::::set_block_number(3); - - assert_eq!( - Pallet::::on_idle(0, dbw.reads_writes(100, 100)), - dbw.reads_writes(1, 2), - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID).data().oldest_unpruned_nonce, - 2 - ); - assert_eq!( - outbound_lane::(TEST_LANE_ID_2).data().oldest_unpruned_nonce, - 2 - ); - }); - } - - #[test] - fn outbound_message_from_unconfigured_lane_is_rejected() { - run_test(|| { - assert_noop!( - Pallet::::validate_message(TEST_LANE_ID_3, ®ULAR_PAYLOAD,), - Error::::InactiveOutboundLane, - ); - }); - } - - #[test] - fn test_bridge_messages_call_is_correctly_defined() { - let account_id = 1; - let message_proof: TestMessagesProof = Ok(vec![message(1, REGULAR_PAYLOAD)]).into(); - let message_delivery_proof = TestMessagesDeliveryProof(Ok(( - TEST_LANE_ID, - InboundLaneData { - last_confirmed_nonce: 1, - relayers: vec![UnrewardedRelayer { - relayer: 0, - messages: DeliveredMessages::new(1), - }] - .into_iter() - .collect(), - }, - ))); - let unrewarded_relayer_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 1, - last_delivered_nonce: 1, - ..Default::default() - }; - - let direct_receive_messages_proof_call = Call::::receive_messages_proof { - relayer_id_at_bridged_chain: account_id, - proof: message_proof.clone(), - messages_count: 1, - dispatch_weight: REGULAR_PAYLOAD.declared_weight, - }; - let indirect_receive_messages_proof_call = BridgeMessagesCall::< - AccountId, - TestMessagesProof, - TestMessagesDeliveryProof, - >::receive_messages_proof { - relayer_id_at_bridged_chain: account_id, - proof: message_proof, - messages_count: 1, - dispatch_weight: REGULAR_PAYLOAD.declared_weight, - }; - assert_eq!( - direct_receive_messages_proof_call.encode(), - indirect_receive_messages_proof_call.encode() - ); - - let direct_receive_messages_delivery_proof_call = - Call::::receive_messages_delivery_proof { - proof: message_delivery_proof.clone(), - relayers_state: unrewarded_relayer_state.clone(), - }; - let indirect_receive_messages_delivery_proof_call = BridgeMessagesCall::< - AccountId, - TestMessagesProof, - TestMessagesDeliveryProof, - >::receive_messages_delivery_proof { - proof: message_delivery_proof, - relayers_state: unrewarded_relayer_state, - }; - assert_eq!( - direct_receive_messages_delivery_proof_call.encode(), - indirect_receive_messages_delivery_proof_call.encode() - ); - } - - generate_owned_bridge_module_tests!( - MessagesOperatingMode::Basic(BasicOperatingMode::Normal), - MessagesOperatingMode::Basic(BasicOperatingMode::Halted) - ); - - #[test] - fn inbound_storage_extra_proof_size_bytes_works() { - fn relayer_entry() -> UnrewardedRelayer { - UnrewardedRelayer { relayer: 42u64, messages: DeliveredMessages { begin: 0, end: 100 } } - } - - fn storage(relayer_entries: usize) -> RuntimeInboundLaneStorage { - RuntimeInboundLaneStorage { - lane_id: Default::default(), - cached_data: Some(InboundLaneData { - relayers: vec![relayer_entry(); relayer_entries].into_iter().collect(), - last_confirmed_nonce: 0, - }), - _phantom: Default::default(), - } - } - - let max_entries = crate::mock::MaxUnrewardedRelayerEntriesAtInboundLane::get() as usize; - - // when we have exactly `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - assert_eq!(storage(max_entries).extra_proof_size_bytes(), 0); - - // when we have less than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - assert_eq!( - storage(max_entries - 1).extra_proof_size_bytes(), - relayer_entry().encode().len() as u64 - ); - assert_eq!( - storage(max_entries - 2).extra_proof_size_bytes(), - 2 * relayer_entry().encode().len() as u64 - ); - - // when we have more than `MaxUnrewardedRelayerEntriesAtInboundLane` unrewarded relayers - // (shall not happen in practice) - assert_eq!(storage(max_entries + 1).extra_proof_size_bytes(), 0); - } - - #[test] - fn maybe_outbound_lanes_count_returns_correct_value() { - assert_eq!( - MaybeOutboundLanesCount::::get(), - Some(mock::ActiveOutboundLanes::get().len() as u32) - ); - } -} diff --git a/modules/messages/src/mock.rs b/modules/messages/src/mock.rs deleted file mode 100644 index af92120539854347111d0562e284dc59e6e251d9..0000000000000000000000000000000000000000 --- a/modules/messages/src/mock.rs +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// From construct_runtime macro -#![allow(clippy::from_over_into)] - -use crate::{Config, StoredMessagePayload}; - -use bp_messages::{ - calc_relayers_rewards, - source_chain::{DeliveryConfirmationPayments, OnMessagesDelivered, TargetHeaderChain}, - target_chain::{ - DeliveryPayments, DispatchMessage, DispatchMessageData, MessageDispatch, - ProvedLaneMessages, ProvedMessages, SourceHeaderChain, - }, - DeliveredMessages, InboundLaneData, LaneId, Message, MessageKey, MessageNonce, - UnrewardedRelayer, UnrewardedRelayersState, VerificationError, -}; -use bp_runtime::{messages::MessageDispatchResult, Size}; -use codec::{Decode, Encode}; -use frame_support::{ - derive_impl, parameter_types, - weights::{constants::RocksDbWeight, Weight}, -}; -use scale_info::TypeInfo; -use sp_runtime::BuildStorage; -use std::{ - collections::{BTreeMap, VecDeque}, - ops::RangeInclusive, -}; - -pub type AccountId = u64; -pub type Balance = u64; -#[derive(Decode, Encode, Clone, Debug, PartialEq, Eq, TypeInfo)] -pub struct TestPayload { - /// Field that may be used to identify messages. - pub id: u64, - /// Dispatch weight that is declared by the message sender. - pub declared_weight: Weight, - /// Message dispatch result. - /// - /// Note: in correct code `dispatch_result.unspent_weight` will always be <= `declared_weight`, - /// but for test purposes we'll be making it larger than `declared_weight` sometimes. - pub dispatch_result: MessageDispatchResult, - /// Extra bytes that affect payload size. - pub extra: Vec, -} -pub type TestMessageFee = u64; -pub type TestRelayer = u64; -pub type TestDispatchLevelResult = (); - -type Block = frame_system::mocking::MockBlock; - -use crate as pallet_bridge_messages; - -frame_support::construct_runtime! { - pub enum TestRuntime - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Event}, - Messages: pallet_bridge_messages::{Pallet, Call, Event}, - } -} - -pub type DbWeight = RocksDbWeight; - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type Block = Block; - type AccountData = pallet_balances::AccountData; - type DbWeight = DbWeight; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] -impl pallet_balances::Config for TestRuntime { - type ReserveIdentifier = [u8; 8]; - type AccountStore = System; -} - -parameter_types! { - pub const MaxMessagesToPruneAtOnce: u64 = 10; - pub const MaxUnrewardedRelayerEntriesAtInboundLane: u64 = 16; - pub const MaxUnconfirmedMessagesAtInboundLane: u64 = 128; - pub const TestBridgedChainId: bp_runtime::ChainId = *b"test"; - pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID, TEST_LANE_ID_2]; -} - -/// weights of messages pallet calls we use in tests. -pub type TestWeightInfo = (); - -impl Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = TestWeightInfo; - type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = MaxUnrewardedRelayerEntriesAtInboundLane; - type MaxUnconfirmedMessagesAtInboundLane = MaxUnconfirmedMessagesAtInboundLane; - - type MaximalOutboundPayloadSize = frame_support::traits::ConstU32; - type OutboundPayload = TestPayload; - - type InboundPayload = TestPayload; - type InboundRelayer = TestRelayer; - type DeliveryPayments = TestDeliveryPayments; - - type TargetHeaderChain = TestTargetHeaderChain; - type DeliveryConfirmationPayments = TestDeliveryConfirmationPayments; - type OnMessagesDelivered = TestOnMessagesDelivered; - - type SourceHeaderChain = TestSourceHeaderChain; - type MessageDispatch = TestMessageDispatch; - type BridgedChainId = TestBridgedChainId; -} - -#[cfg(feature = "runtime-benchmarks")] -impl crate::benchmarking::Config<()> for TestRuntime { - fn bench_lane_id() -> LaneId { - TEST_LANE_ID - } - - fn prepare_message_proof( - params: crate::benchmarking::MessageProofParams, - ) -> (TestMessagesProof, Weight) { - // in mock run we only care about benchmarks correctness, not the benchmark results - // => ignore size related arguments - let (messages, total_dispatch_weight) = - params.message_nonces.into_iter().map(|n| message(n, REGULAR_PAYLOAD)).fold( - (Vec::new(), Weight::zero()), - |(mut messages, total_dispatch_weight), message| { - let weight = REGULAR_PAYLOAD.declared_weight; - messages.push(message); - (messages, total_dispatch_weight.saturating_add(weight)) - }, - ); - let mut proof: TestMessagesProof = Ok(messages).into(); - proof.result.as_mut().unwrap().get_mut(0).unwrap().1.lane_state = params.outbound_lane_data; - (proof, total_dispatch_weight) - } - - fn prepare_message_delivery_proof( - params: crate::benchmarking::MessageDeliveryProofParams, - ) -> TestMessagesDeliveryProof { - // in mock run we only care about benchmarks correctness, not the benchmark results - // => ignore size related arguments - TestMessagesDeliveryProof(Ok((params.lane, params.inbound_lane_data))) - } - - fn is_relayer_rewarded(_relayer: &AccountId) -> bool { - true - } -} - -impl Size for TestPayload { - fn size(&self) -> u32 { - 16 + self.extra.len() as u32 - } -} - -/// Maximal outbound payload size. -pub const MAX_OUTBOUND_PAYLOAD_SIZE: u32 = 4096; - -/// Account that has balance to use in tests. -pub const ENDOWED_ACCOUNT: AccountId = 0xDEAD; - -/// Account id of test relayer. -pub const TEST_RELAYER_A: AccountId = 100; - -/// Account id of additional test relayer - B. -pub const TEST_RELAYER_B: AccountId = 101; - -/// Account id of additional test relayer - C. -pub const TEST_RELAYER_C: AccountId = 102; - -/// Error that is returned by all test implementations. -pub const TEST_ERROR: &str = "Test error"; - -/// Lane that we're using in tests. -pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 1]); - -/// Secondary lane that we're using in tests. -pub const TEST_LANE_ID_2: LaneId = LaneId([0, 0, 0, 2]); - -/// Inactive outbound lane. -pub const TEST_LANE_ID_3: LaneId = LaneId([0, 0, 0, 3]); - -/// Regular message payload. -pub const REGULAR_PAYLOAD: TestPayload = message_payload(0, 50); - -/// Payload that is rejected by `TestTargetHeaderChain`. -pub const PAYLOAD_REJECTED_BY_TARGET_CHAIN: TestPayload = message_payload(1, 50); - -/// Vec of proved messages, grouped by lane. -pub type MessagesByLaneVec = Vec<(LaneId, ProvedLaneMessages)>; - -/// Test messages proof. -#[derive(Debug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub struct TestMessagesProof { - pub result: Result, -} - -impl Size for TestMessagesProof { - fn size(&self) -> u32 { - 0 - } -} - -impl From, ()>> for TestMessagesProof { - fn from(result: Result, ()>) -> Self { - Self { - result: result.map(|messages| { - let mut messages_by_lane: BTreeMap> = - BTreeMap::new(); - for message in messages { - messages_by_lane.entry(message.key.lane_id).or_default().messages.push(message); - } - messages_by_lane.into_iter().collect() - }), - } - } -} - -/// Messages delivery proof used in tests. -#[derive(Debug, Encode, Decode, Eq, Clone, PartialEq, TypeInfo)] -pub struct TestMessagesDeliveryProof(pub Result<(LaneId, InboundLaneData), ()>); - -impl Size for TestMessagesDeliveryProof { - fn size(&self) -> u32 { - 0 - } -} - -/// Target header chain that is used in tests. -#[derive(Debug, Default)] -pub struct TestTargetHeaderChain; - -impl TargetHeaderChain for TestTargetHeaderChain { - type MessagesDeliveryProof = TestMessagesDeliveryProof; - - fn verify_message(payload: &TestPayload) -> Result<(), VerificationError> { - if *payload == PAYLOAD_REJECTED_BY_TARGET_CHAIN { - Err(VerificationError::Other(TEST_ERROR)) - } else { - Ok(()) - } - } - - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError> { - proof.0.map_err(|_| VerificationError::Other(TEST_ERROR)) - } -} - -/// Reward payments at the target chain during delivery transaction. -#[derive(Debug, Default)] -pub struct TestDeliveryPayments; - -impl TestDeliveryPayments { - /// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is - /// cleared after the call. - pub fn is_reward_paid(relayer: AccountId) -> bool { - let key = (b":delivery-relayer-reward:", relayer).encode(); - frame_support::storage::unhashed::take::(&key).is_some() - } -} - -impl DeliveryPayments for TestDeliveryPayments { - type Error = &'static str; - - fn pay_reward( - relayer: AccountId, - _total_messages: MessageNonce, - _valid_messages: MessageNonce, - _actual_weight: Weight, - ) { - let key = (b":delivery-relayer-reward:", relayer).encode(); - frame_support::storage::unhashed::put(&key, &true); - } -} - -/// Reward payments at the source chain during delivery confirmation transaction. -#[derive(Debug, Default)] -pub struct TestDeliveryConfirmationPayments; - -impl TestDeliveryConfirmationPayments { - /// Returns true if given relayer has been rewarded with given balance. The reward-paid flag is - /// cleared after the call. - pub fn is_reward_paid(relayer: AccountId, fee: TestMessageFee) -> bool { - let key = (b":relayer-reward:", relayer, fee).encode(); - frame_support::storage::unhashed::take::(&key).is_some() - } -} - -impl DeliveryConfirmationPayments for TestDeliveryConfirmationPayments { - type Error = &'static str; - - fn pay_reward( - _lane_id: LaneId, - messages_relayers: VecDeque>, - _confirmation_relayer: &AccountId, - received_range: &RangeInclusive, - ) -> MessageNonce { - let relayers_rewards = calc_relayers_rewards(messages_relayers, received_range); - let rewarded_relayers = relayers_rewards.len(); - for (relayer, reward) in &relayers_rewards { - let key = (b":relayer-reward:", relayer, reward).encode(); - frame_support::storage::unhashed::put(&key, &true); - } - - rewarded_relayers as _ - } -} - -/// Source header chain that is used in tests. -#[derive(Debug)] -pub struct TestSourceHeaderChain; - -impl SourceHeaderChain for TestSourceHeaderChain { - type MessagesProof = TestMessagesProof; - - fn verify_messages_proof( - proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result, VerificationError> { - proof - .result - .map(|proof| proof.into_iter().collect()) - .map_err(|_| VerificationError::Other(TEST_ERROR)) - } -} - -/// Test message dispatcher. -#[derive(Debug)] -pub struct TestMessageDispatch; - -impl TestMessageDispatch { - pub fn deactivate() { - frame_support::storage::unhashed::put(b"TestMessageDispatch.IsCongested", &true) - } -} - -impl MessageDispatch for TestMessageDispatch { - type DispatchPayload = TestPayload; - type DispatchLevelResult = TestDispatchLevelResult; - - fn is_active() -> bool { - !frame_support::storage::unhashed::get_or_default::( - b"TestMessageDispatch.IsCongested", - ) - } - - fn dispatch_weight(message: &mut DispatchMessage) -> Weight { - match message.data.payload.as_ref() { - Ok(payload) => payload.declared_weight, - Err(_) => Weight::zero(), - } - } - - fn dispatch( - message: DispatchMessage, - ) -> MessageDispatchResult { - match message.data.payload.as_ref() { - Ok(payload) => payload.dispatch_result.clone(), - Err(_) => dispatch_result(0), - } - } -} - -/// Test callback, called during message delivery confirmation transaction. -pub struct TestOnMessagesDelivered; - -impl TestOnMessagesDelivered { - pub fn call_arguments() -> Option<(LaneId, MessageNonce)> { - frame_support::storage::unhashed::get(b"TestOnMessagesDelivered.OnMessagesDelivered") - } -} - -impl OnMessagesDelivered for TestOnMessagesDelivered { - fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce) { - frame_support::storage::unhashed::put( - b"TestOnMessagesDelivered.OnMessagesDelivered", - &(lane, enqueued_messages), - ); - } -} - -/// Return test lane message with given nonce and payload. -pub fn message(nonce: MessageNonce, payload: TestPayload) -> Message { - Message { key: MessageKey { lane_id: TEST_LANE_ID, nonce }, payload: payload.encode() } -} - -/// Return valid outbound message data, constructed from given payload. -pub fn outbound_message_data(payload: TestPayload) -> StoredMessagePayload { - StoredMessagePayload::::try_from(payload.encode()).expect("payload too large") -} - -/// Return valid inbound (dispatch) message data, constructed from given payload. -pub fn inbound_message_data(payload: TestPayload) -> DispatchMessageData { - DispatchMessageData { payload: Ok(payload) } -} - -/// Constructs message payload using given arguments and zero unspent weight. -pub const fn message_payload(id: u64, declared_weight: u64) -> TestPayload { - TestPayload { - id, - declared_weight: Weight::from_parts(declared_weight, 0), - dispatch_result: dispatch_result(0), - extra: Vec::new(), - } -} - -/// Returns message dispatch result with given unspent weight. -pub const fn dispatch_result( - unspent_weight: u64, -) -> MessageDispatchResult { - MessageDispatchResult { - unspent_weight: Weight::from_parts(unspent_weight, 0), - dispatch_level_result: (), - } -} - -/// Constructs unrewarded relayer entry from nonces range and relayer id. -pub fn unrewarded_relayer( - begin: MessageNonce, - end: MessageNonce, - relayer: TestRelayer, -) -> UnrewardedRelayer { - UnrewardedRelayer { relayer, messages: DeliveredMessages { begin, end } } -} - -/// Returns unrewarded relayers state at given lane. -pub fn inbound_unrewarded_relayers_state(lane: bp_messages::LaneId) -> UnrewardedRelayersState { - let inbound_lane_data = crate::InboundLanes::::get(lane).0; - UnrewardedRelayersState::from(&inbound_lane_data) -} - -/// Return test externalities to use in tests. -pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - pallet_balances::GenesisConfig:: { balances: vec![(ENDOWED_ACCOUNT, 1_000_000)] } - .assimilate_storage(&mut t) - .unwrap(); - sp_io::TestExternalities::new(t) -} - -/// Run pallet test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - new_test_ext().execute_with(test) -} diff --git a/modules/messages/src/outbound_lane.rs b/modules/messages/src/outbound_lane.rs deleted file mode 100644 index 431c2cfb7eef3e8dd48e49c6ac37153ae64d57b6..0000000000000000000000000000000000000000 --- a/modules/messages/src/outbound_lane.rs +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Everything about outgoing messages sending. - -use crate::{Config, LOG_TARGET}; - -use bp_messages::{DeliveredMessages, LaneId, MessageNonce, OutboundLaneData, UnrewardedRelayer}; -use codec::{Decode, Encode}; -use frame_support::{ - weights::{RuntimeDbWeight, Weight}, - BoundedVec, PalletError, -}; -use num_traits::Zero; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_std::collections::vec_deque::VecDeque; - -/// Outbound lane storage. -pub trait OutboundLaneStorage { - type StoredMessagePayload; - - /// Lane id. - fn id(&self) -> LaneId; - /// Get lane data from the storage. - fn data(&self) -> OutboundLaneData; - /// Update lane data in the storage. - fn set_data(&mut self, data: OutboundLaneData); - /// Returns saved outbound message payload. - #[cfg(test)] - fn message(&self, nonce: &MessageNonce) -> Option; - /// Save outbound message in the storage. - fn save_message(&mut self, nonce: MessageNonce, message_payload: Self::StoredMessagePayload); - /// Remove outbound message from the storage. - fn remove_message(&mut self, nonce: &MessageNonce); -} - -/// Outbound message data wrapper that implements `MaxEncodedLen`. -pub type StoredMessagePayload = BoundedVec>::MaximalOutboundPayloadSize>; - -/// Result of messages receival confirmation. -#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] -pub enum ReceivalConfirmationError { - /// Bridged chain is trying to confirm more messages than we have generated. May be a result - /// of invalid bridged chain storage. - FailedToConfirmFutureMessages, - /// The unrewarded relayers vec contains an empty entry. May be a result of invalid bridged - /// chain storage. - EmptyUnrewardedRelayerEntry, - /// The unrewarded relayers vec contains non-consecutive entries. May be a result of invalid - /// bridged chain storage. - NonConsecutiveUnrewardedRelayerEntries, - /// The chain has more messages that need to be confirmed than there is in the proof. - TryingToConfirmMoreMessagesThanExpected, -} - -/// Outbound messages lane. -pub struct OutboundLane { - storage: S, -} - -impl OutboundLane { - /// Create new outbound lane backed by given storage. - pub fn new(storage: S) -> Self { - OutboundLane { storage } - } - - /// Get this lane data. - pub fn data(&self) -> OutboundLaneData { - self.storage.data() - } - - /// Send message over lane. - /// - /// Returns new message nonce. - pub fn send_message(&mut self, message_payload: S::StoredMessagePayload) -> MessageNonce { - let mut data = self.storage.data(); - let nonce = data.latest_generated_nonce + 1; - data.latest_generated_nonce = nonce; - - self.storage.save_message(nonce, message_payload); - self.storage.set_data(data); - - nonce - } - - /// Confirm messages delivery. - pub fn confirm_delivery( - &mut self, - max_allowed_messages: MessageNonce, - latest_delivered_nonce: MessageNonce, - relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { - let mut data = self.storage.data(); - let confirmed_messages = DeliveredMessages { - begin: data.latest_received_nonce.saturating_add(1), - end: latest_delivered_nonce, - }; - if confirmed_messages.total_messages() == 0 { - return Ok(None) - } - if confirmed_messages.end > data.latest_generated_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) - } - if confirmed_messages.total_messages() > max_allowed_messages { - // that the relayer has declared correct number of messages that the proof contains (it - // is checked outside of the function). But it may happen (but only if this/bridged - // chain storage is corrupted, though) that the actual number of confirmed messages if - // larger than declared. This would mean that 'reward loop' will take more time than the - // weight formula accounts, so we can't allow that. - log::trace!( - target: LOG_TARGET, - "Messages delivery proof contains too many messages to confirm: {} vs declared {}", - confirmed_messages.total_messages(), - max_allowed_messages, - ); - return Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected) - } - - ensure_unrewarded_relayers_are_correct(confirmed_messages.end, relayers)?; - - data.latest_received_nonce = confirmed_messages.end; - self.storage.set_data(data); - - Ok(Some(confirmed_messages)) - } - - /// Prune at most `max_messages_to_prune` already received messages. - /// - /// Returns weight, consumed by messages pruning and lane state update. - pub fn prune_messages( - &mut self, - db_weight: RuntimeDbWeight, - mut remaining_weight: Weight, - ) -> Weight { - let write_weight = db_weight.writes(1); - let two_writes_weight = write_weight + write_weight; - let mut spent_weight = Weight::zero(); - let mut data = self.storage.data(); - while remaining_weight.all_gte(two_writes_weight) && - data.oldest_unpruned_nonce <= data.latest_received_nonce - { - self.storage.remove_message(&data.oldest_unpruned_nonce); - - spent_weight += write_weight; - remaining_weight -= write_weight; - data.oldest_unpruned_nonce += 1; - } - - if !spent_weight.is_zero() { - spent_weight += write_weight; - self.storage.set_data(data); - } - - spent_weight - } -} - -/// Verifies unrewarded relayers vec. -/// -/// Returns `Err(_)` if unrewarded relayers vec contains invalid data, meaning that the bridged -/// chain has invalid runtime storage. -fn ensure_unrewarded_relayers_are_correct( - latest_received_nonce: MessageNonce, - relayers: &VecDeque>, -) -> Result<(), ReceivalConfirmationError> { - let mut expected_entry_begin = relayers.front().map(|entry| entry.messages.begin); - for entry in relayers { - // unrewarded relayer entry must have at least 1 unconfirmed message - // (guaranteed by the `InboundLane::receive_message()`) - if entry.messages.end < entry.messages.begin { - return Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry) - } - // every entry must confirm range of messages that follows previous entry range - // (guaranteed by the `InboundLane::receive_message()`) - if expected_entry_begin != Some(entry.messages.begin) { - return Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries) - } - expected_entry_begin = entry.messages.end.checked_add(1); - // entry can't confirm messages larger than `inbound_lane_data.latest_received_nonce()` - // (guaranteed by the `InboundLane::receive_message()`) - if entry.messages.end > latest_received_nonce { - return Err(ReceivalConfirmationError::FailedToConfirmFutureMessages) - } - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - mock::{ - outbound_message_data, run_test, unrewarded_relayer, TestRelayer, TestRuntime, - REGULAR_PAYLOAD, TEST_LANE_ID, - }, - outbound_lane, - }; - use frame_support::weights::constants::RocksDbWeight; - use sp_std::ops::RangeInclusive; - - fn unrewarded_relayers( - nonces: RangeInclusive, - ) -> VecDeque> { - vec![unrewarded_relayer(*nonces.start(), *nonces.end(), 0)] - .into_iter() - .collect() - } - - fn delivered_messages(nonces: RangeInclusive) -> DeliveredMessages { - DeliveredMessages { begin: *nonces.start(), end: *nonces.end() } - } - - fn assert_3_messages_confirmation_fails( - latest_received_nonce: MessageNonce, - relayers: &VecDeque>, - ) -> Result, ReceivalConfirmationError> { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - let result = lane.confirm_delivery(3, latest_received_nonce, relayers); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - result - }) - } - - #[test] - fn send_message_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - assert_eq!(lane.storage.data().latest_generated_nonce, 0); - assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 1); - assert!(lane.storage.message(&1).is_some()); - assert_eq!(lane.storage.data().latest_generated_nonce, 1); - }); - } - - #[test] - fn confirm_delivery_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 1); - assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 2); - assert_eq!(lane.send_message(outbound_message_data(REGULAR_PAYLOAD)), 3); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!( - lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), - Ok(Some(delivered_messages(1..=3))), - ); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - }); - } - - #[test] - fn confirm_delivery_rejects_nonce_lesser_than_latest_received() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 0); - assert_eq!( - lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), - Ok(Some(delivered_messages(1..=3))), - ); - assert_eq!(lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), Ok(None),); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - - assert_eq!(lane.confirm_delivery(1, 2, &unrewarded_relayers(1..=1)), Ok(None),); - assert_eq!(lane.storage.data().latest_generated_nonce, 3); - assert_eq!(lane.storage.data().latest_received_nonce, 3); - }); - } - - #[test] - fn confirm_delivery_rejects_nonce_larger_than_last_generated() { - assert_eq!( - assert_3_messages_confirmation_fails(10, &unrewarded_relayers(1..=10),), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), - ); - } - - #[test] - fn confirm_delivery_fails_if_entry_confirms_future_messages() { - assert_eq!( - assert_3_messages_confirmation_fails( - 3, - &unrewarded_relayers(1..=1) - .into_iter() - .chain(unrewarded_relayers(2..=30).into_iter()) - .chain(unrewarded_relayers(3..=3).into_iter()) - .collect(), - ), - Err(ReceivalConfirmationError::FailedToConfirmFutureMessages), - ); - } - - #[test] - #[allow(clippy::reversed_empty_ranges)] - fn confirm_delivery_fails_if_entry_is_empty() { - assert_eq!( - assert_3_messages_confirmation_fails( - 3, - &unrewarded_relayers(1..=1) - .into_iter() - .chain(unrewarded_relayers(2..=1).into_iter()) - .chain(unrewarded_relayers(2..=3).into_iter()) - .collect(), - ), - Err(ReceivalConfirmationError::EmptyUnrewardedRelayerEntry), - ); - } - - #[test] - fn confirm_delivery_fails_if_entries_are_non_consecutive() { - assert_eq!( - assert_3_messages_confirmation_fails( - 3, - &unrewarded_relayers(1..=1) - .into_iter() - .chain(unrewarded_relayers(3..=3).into_iter()) - .chain(unrewarded_relayers(2..=2).into_iter()) - .collect(), - ), - Err(ReceivalConfirmationError::NonConsecutiveUnrewardedRelayerEntries), - ); - } - - #[test] - fn prune_messages_works() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - // when lane is empty, nothing is pruned - assert_eq!( - lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)), - Weight::zero() - ); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); - // when nothing is confirmed, nothing is pruned - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - assert!(lane.storage.message(&1).is_some()); - assert!(lane.storage.message(&2).is_some()); - assert!(lane.storage.message(&3).is_some()); - assert_eq!( - lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)), - Weight::zero() - ); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 1); - // after confirmation, some messages are received - assert_eq!( - lane.confirm_delivery(2, 2, &unrewarded_relayers(1..=2)), - Ok(Some(delivered_messages(1..=2))), - ); - assert_eq!( - lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)), - RocksDbWeight::get().writes(3), - ); - assert!(lane.storage.message(&1).is_none()); - assert!(lane.storage.message(&2).is_none()); - assert!(lane.storage.message(&3).is_some()); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 3); - // after last message is confirmed, everything is pruned - assert_eq!( - lane.confirm_delivery(1, 3, &unrewarded_relayers(3..=3)), - Ok(Some(delivered_messages(3..=3))), - ); - assert_eq!( - lane.prune_messages(RocksDbWeight::get(), RocksDbWeight::get().writes(101)), - RocksDbWeight::get().writes(2), - ); - assert!(lane.storage.message(&1).is_none()); - assert!(lane.storage.message(&2).is_none()); - assert!(lane.storage.message(&3).is_none()); - assert_eq!(lane.storage.data().oldest_unpruned_nonce, 4); - }); - } - - #[test] - fn confirm_delivery_detects_when_more_than_expected_messages_are_confirmed() { - run_test(|| { - let mut lane = outbound_lane::(TEST_LANE_ID); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - lane.send_message(outbound_message_data(REGULAR_PAYLOAD)); - assert_eq!( - lane.confirm_delivery(0, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), - ); - assert_eq!( - lane.confirm_delivery(2, 3, &unrewarded_relayers(1..=3)), - Err(ReceivalConfirmationError::TryingToConfirmMoreMessagesThanExpected), - ); - assert_eq!( - lane.confirm_delivery(3, 3, &unrewarded_relayers(1..=3)), - Ok(Some(delivered_messages(1..=3))), - ); - }); - } -} diff --git a/modules/messages/src/weights.rs b/modules/messages/src/weights.rs deleted file mode 100644 index 5bf7d56756079df8a5e469b9c50ba7607b65d983..0000000000000000000000000000000000000000 --- a/modules/messages/src/weights.rs +++ /dev/null @@ -1,525 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for pallet_bridge_messages -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-23, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// target/release/unknown-bridge-node -// benchmark -// pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_bridge_messages -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/messages/src/weights.rs -// --template=./.maintain/bridge-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_bridge_messages. -pub trait WeightInfo { - fn receive_single_message_proof() -> Weight; - fn receive_two_messages_proof() -> Weight; - fn receive_single_message_proof_with_outbound_lane_state() -> Weight; - fn receive_single_message_proof_1_kb() -> Weight; - fn receive_single_message_proof_16_kb() -> Weight; - fn receive_delivery_proof_for_single_message() -> Weight; - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight; - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight; - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight; -} - -/// Weights for `pallet_bridge_messages` that are generated using one of the Bridge testnets. -/// -/// Those weights are test only and must never be used in production. -pub struct BridgeWeight(PhantomData); -impl WeightInfo for BridgeWeight { - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_321 nanoseconds. - Weight::from_parts(54_478_000, 57170) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_597 nanoseconds. - Weight::from_parts(69_267_000, 57170) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_079 nanoseconds. - Weight::from_parts(65_905_000, 57170) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 50_588 nanoseconds. - Weight::from_parts(53_544_000, 57170) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 78_269 nanoseconds. - Weight::from_parts(81_748_000, 57170) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: - /// 539, mode: MaxEncodedLen) - /// - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `9584` - // Minimum execution time: 45_786 nanoseconds. - Weight::from_parts(47_382_000, 9584) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: - /// 539, mode: MaxEncodedLen) - /// - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `9584` - // Minimum execution time: 44_544 nanoseconds. - Weight::from_parts(45_451_000, 9584) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: - /// 539, mode: MaxEncodedLen) - /// - /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `12124` - // Minimum execution time: 47_344 nanoseconds. - Weight::from_parts(48_311_000, 12124) - .saturating_add(T::DbWeight::get().reads(5_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - /// - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_385 nanoseconds. - Weight::from_parts(54_919_468, 57170) - // Standard Error: 108 - .saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into())) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_321 nanoseconds. - Weight::from_parts(54_478_000, 57170) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_two_messages_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_597 nanoseconds. - Weight::from_parts(69_267_000, 57170) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 64_079 nanoseconds. - Weight::from_parts(65_905_000, 57170) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_1_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 50_588 nanoseconds. - Weight::from_parts(53_544_000, 57170) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - fn receive_single_message_proof_16_kb() -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 78_269 nanoseconds. - Weight::from_parts(81_748_000, 57170) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: - /// 539, mode: MaxEncodedLen) - /// - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn receive_delivery_proof_for_single_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `579` - // Estimated: `9584` - // Minimum execution time: 45_786 nanoseconds. - Weight::from_parts(47_382_000, 9584) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: - /// 539, mode: MaxEncodedLen) - /// - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `9584` - // Minimum execution time: 44_544 nanoseconds. - Weight::from_parts(45_451_000, 9584) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages OutboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages OutboundLanes (max_values: Some(1), max_size: Some(44), added: - /// 539, mode: MaxEncodedLen) - /// - /// Storage: BridgeRelayers RelayerRewards (r:2 w:2) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - // Proof Size summary in bytes: - // Measured: `596` - // Estimated: `12124` - // Minimum execution time: 47_344 nanoseconds. - Weight::from_parts(48_311_000, 12124) - .saturating_add(RocksDbWeight::get().reads(5_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: BridgeUnknownMessages PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownMessages PalletOperatingMode (max_values: Some(1), max_size: Some(2), - /// added: 497, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownMessages InboundLanes (r:1 w:1) - /// - /// Proof: BridgeUnknownMessages InboundLanes (max_values: None, max_size: Some(49180), added: - /// 51655, mode: MaxEncodedLen) - /// - /// The range of component `i` is `[128, 2048]`. - fn receive_single_message_proof_with_dispatch(i: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `618` - // Estimated: `57170` - // Minimum execution time: 52_385 nanoseconds. - Weight::from_parts(54_919_468, 57170) - // Standard Error: 108 - .saturating_add(Weight::from_parts(3_286, 0).saturating_mul(i.into())) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } -} diff --git a/modules/messages/src/weights_ext.rs b/modules/messages/src/weights_ext.rs deleted file mode 100644 index c12e04f692bf8304fb58d7c97ec50d1b860ccb56..0000000000000000000000000000000000000000 --- a/modules/messages/src/weights_ext.rs +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Weight-related utilities. - -use crate::weights::WeightInfo; - -use bp_messages::{MessageNonce, UnrewardedRelayersState}; -use bp_runtime::{PreComputedSize, Size}; -use frame_support::weights::Weight; - -/// Size of the message being delivered in benchmarks. -pub const EXPECTED_DEFAULT_MESSAGE_LENGTH: u32 = 128; - -/// We assume that size of signed extensions on all our chains and size of all 'small' arguments of -/// calls we're checking here would fit 1KB. -const SIGNED_EXTENSIONS_SIZE: u32 = 1024; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof. -/// This mostly depends on number of entries (and their density) in the storage trie. -/// Some reserve is reserved to account future chain growth. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Ensure that weights from `WeightInfoExt` implementation are looking correct. -pub fn ensure_weights_are_correct() { - // all components of weight formulae must have zero `proof_size`, because the `proof_size` is - // benchmarked using `MaxEncodedLen` approach and there are no components that cause additional - // db reads - - // verify `receive_messages_proof` weight components - assert_ne!(W::receive_messages_proof_overhead().ref_time(), 0); - assert_ne!(W::receive_messages_proof_overhead().proof_size(), 0); - // W::receive_messages_proof_messages_overhead(1).ref_time() may be zero because: - // the message processing code (`InboundLane::receive_message`) is minimal and may not be - // accounted by our benchmarks - assert_eq!(W::receive_messages_proof_messages_overhead(1).proof_size(), 0); - // W::receive_messages_proof_outbound_lane_state_overhead().ref_time() may be zero because: - // the outbound lane state processing code (`InboundLane::receive_state_update`) is minimal and - // may not be accounted by our benchmarks - assert_eq!(W::receive_messages_proof_outbound_lane_state_overhead().proof_size(), 0); - assert_ne!(W::storage_proof_size_overhead(1).ref_time(), 0); - assert_eq!(W::storage_proof_size_overhead(1).proof_size(), 0); - - // verify `receive_messages_delivery_proof` weight components - assert_ne!(W::receive_messages_delivery_proof_overhead().ref_time(), 0); - assert_ne!(W::receive_messages_delivery_proof_overhead().proof_size(), 0); - // W::receive_messages_delivery_proof_messages_overhead(1).ref_time() may be zero because: - // there's no code that iterates over confirmed messages in confirmation transaction - assert_eq!(W::receive_messages_delivery_proof_messages_overhead(1).proof_size(), 0); - // W::receive_messages_delivery_proof_relayers_overhead(1).ref_time() may be zero because: - // runtime **can** choose not to pay any rewards to relayers - // W::receive_messages_delivery_proof_relayers_overhead(1).proof_size() is an exception - // it may or may not cause additional db reads, so proof size may vary - assert_ne!(W::storage_proof_size_overhead(1).ref_time(), 0); - assert_eq!(W::storage_proof_size_overhead(1).proof_size(), 0); - - // verify `receive_message_proof` weight - let receive_messages_proof_weight = - W::receive_messages_proof_weight(&PreComputedSize(1), 10, Weight::zero()); - assert_ne!(receive_messages_proof_weight.ref_time(), 0); - assert_ne!(receive_messages_proof_weight.proof_size(), 0); - messages_proof_size_does_not_affect_proof_size::(); - messages_count_does_not_affect_proof_size::(); - - // verify `receive_message_proof` weight - let receive_messages_delivery_proof_weight = W::receive_messages_delivery_proof_weight( - &PreComputedSize(1), - &UnrewardedRelayersState::default(), - ); - assert_ne!(receive_messages_delivery_proof_weight.ref_time(), 0); - assert_ne!(receive_messages_delivery_proof_weight.proof_size(), 0); - messages_delivery_proof_size_does_not_affect_proof_size::(); - total_messages_in_delivery_proof_does_not_affect_proof_size::(); -} - -/// Ensure that we're able to receive maximal (by-size and by-weight) message from other chain. -pub fn ensure_able_to_receive_message( - max_extrinsic_size: u32, - max_extrinsic_weight: Weight, - max_incoming_message_proof_size: u32, - max_incoming_message_dispatch_weight: Weight, -) { - // verify that we're able to receive proof of maximal-size message - let max_delivery_transaction_size = - max_incoming_message_proof_size.saturating_add(SIGNED_EXTENSIONS_SIZE); - assert!( - max_delivery_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery transaction {max_incoming_message_proof_size} + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}", - ); - - // verify that we're able to receive proof of maximal-size message with maximal dispatch weight - let max_delivery_transaction_dispatch_weight = W::receive_messages_proof_weight( - &PreComputedSize( - (max_incoming_message_proof_size + W::expected_extra_storage_proof_size()) as usize, - ), - 1, - max_incoming_message_dispatch_weight, - ); - assert!( - max_delivery_transaction_dispatch_weight.all_lte(max_extrinsic_weight), - "Weight of maximal message delivery transaction + {max_delivery_transaction_dispatch_weight} is larger than maximal possible transaction weight {max_extrinsic_weight}", - ); -} - -/// Ensure that we're able to receive maximal confirmation from other chain. -pub fn ensure_able_to_receive_confirmation( - max_extrinsic_size: u32, - max_extrinsic_weight: Weight, - max_inbound_lane_data_proof_size_from_peer_chain: u32, - max_unrewarded_relayer_entries_at_peer_inbound_lane: MessageNonce, - max_unconfirmed_messages_at_inbound_lane: MessageNonce, -) { - // verify that we're able to receive confirmation of maximal-size - let max_confirmation_transaction_size = - max_inbound_lane_data_proof_size_from_peer_chain.saturating_add(SIGNED_EXTENSIONS_SIZE); - assert!( - max_confirmation_transaction_size <= max_extrinsic_size, - "Size of maximal message delivery confirmation transaction {max_inbound_lane_data_proof_size_from_peer_chain} + {SIGNED_EXTENSIONS_SIZE} is larger than maximal possible transaction size {max_extrinsic_size}", - ); - - // verify that we're able to reward maximal number of relayers that have delivered maximal - // number of messages - let max_confirmation_transaction_dispatch_weight = W::receive_messages_delivery_proof_weight( - &PreComputedSize(max_inbound_lane_data_proof_size_from_peer_chain as usize), - &UnrewardedRelayersState { - unrewarded_relayer_entries: max_unrewarded_relayer_entries_at_peer_inbound_lane, - total_messages: max_unconfirmed_messages_at_inbound_lane, - ..Default::default() - }, - ); - assert!( - max_confirmation_transaction_dispatch_weight.all_lte(max_extrinsic_weight), - "Weight of maximal confirmation transaction {max_confirmation_transaction_dispatch_weight} is larger than maximal possible transaction weight {max_extrinsic_weight}", - ); -} - -/// Panics if `proof_size` of message delivery call depends on the message proof size. -fn messages_proof_size_does_not_affect_proof_size() { - let dispatch_weight = Weight::zero(); - let weight_when_proof_size_is_8k = - W::receive_messages_proof_weight(&PreComputedSize(8 * 1024), 1, dispatch_weight); - let weight_when_proof_size_is_16k = - W::receive_messages_proof_weight(&PreComputedSize(16 * 1024), 1, dispatch_weight); - - ensure_weight_components_are_not_zero(weight_when_proof_size_is_8k); - ensure_weight_components_are_not_zero(weight_when_proof_size_is_16k); - ensure_proof_size_is_the_same( - weight_when_proof_size_is_8k, - weight_when_proof_size_is_16k, - "Messages proof size does not affect values that we read from our storage", - ); -} - -/// Panics if `proof_size` of message delivery call depends on the messages count. -/// -/// In practice, it will depend on the messages count, because most probably every -/// message will read something from db during dispatch. But this must be accounted -/// by the `dispatch_weight`. -fn messages_count_does_not_affect_proof_size() { - let messages_proof_size = PreComputedSize(8 * 1024); - let dispatch_weight = Weight::zero(); - let weight_of_one_incoming_message = - W::receive_messages_proof_weight(&messages_proof_size, 1, dispatch_weight); - let weight_of_two_incoming_messages = - W::receive_messages_proof_weight(&messages_proof_size, 2, dispatch_weight); - - ensure_weight_components_are_not_zero(weight_of_one_incoming_message); - ensure_weight_components_are_not_zero(weight_of_two_incoming_messages); - ensure_proof_size_is_the_same( - weight_of_one_incoming_message, - weight_of_two_incoming_messages, - "Number of same-lane incoming messages does not affect values that we read from our storage", - ); -} - -/// Panics if `proof_size` of delivery confirmation call depends on the delivery proof size. -fn messages_delivery_proof_size_does_not_affect_proof_size() { - let relayers_state = UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1, - last_delivered_nonce: 1, - }; - let weight_when_proof_size_is_8k = - W::receive_messages_delivery_proof_weight(&PreComputedSize(8 * 1024), &relayers_state); - let weight_when_proof_size_is_16k = - W::receive_messages_delivery_proof_weight(&PreComputedSize(16 * 1024), &relayers_state); - - ensure_weight_components_are_not_zero(weight_when_proof_size_is_8k); - ensure_weight_components_are_not_zero(weight_when_proof_size_is_16k); - ensure_proof_size_is_the_same( - weight_when_proof_size_is_8k, - weight_when_proof_size_is_16k, - "Messages delivery proof size does not affect values that we read from our storage", - ); -} - -/// Panics if `proof_size` of delivery confirmation call depends on the number of confirmed -/// messages. -fn total_messages_in_delivery_proof_does_not_affect_proof_size() { - let proof_size = PreComputedSize(8 * 1024); - let weight_when_1k_messages_confirmed = W::receive_messages_delivery_proof_weight( - &proof_size, - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 1024, - last_delivered_nonce: 1, - }, - ); - let weight_when_2k_messages_confirmed = W::receive_messages_delivery_proof_weight( - &proof_size, - &UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 1, - total_messages: 2048, - last_delivered_nonce: 1, - }, - ); - - ensure_weight_components_are_not_zero(weight_when_1k_messages_confirmed); - ensure_weight_components_are_not_zero(weight_when_2k_messages_confirmed); - ensure_proof_size_is_the_same( - weight_when_1k_messages_confirmed, - weight_when_2k_messages_confirmed, - "More messages in delivery proof does not affect values that we read from our storage", - ); -} - -/// Panics if either Weight' `proof_size` or `ref_time` are zero. -fn ensure_weight_components_are_not_zero(weight: Weight) { - assert_ne!(weight.ref_time(), 0); - assert_ne!(weight.proof_size(), 0); -} - -/// Panics if `proof_size` of `weight1` is not equal to `proof_size` of `weight2`. -fn ensure_proof_size_is_the_same(weight1: Weight, weight2: Weight, msg: &str) { - assert_eq!( - weight1.proof_size(), - weight2.proof_size(), - "{msg}: {} must be equal to {}", - weight1.proof_size(), - weight2.proof_size(), - ); -} - -/// Extended weight info. -pub trait WeightInfoExt: WeightInfo { - /// Size of proof that is already included in the single message delivery weight. - /// - /// The message submitter (at source chain) has already covered this cost. But there are two - /// factors that may increase proof size: (1) the message size may be larger than predefined - /// and (2) relayer may add extra trie nodes to the proof. So if proof size is larger than - /// this value, we're going to charge relayer for that. - fn expected_extra_storage_proof_size() -> u32; - - // Our configuration assumes that the runtime has special signed extensions used to: - // - // 1) reject obsolete delivery and confirmation transactions; - // - // 2) refund transaction cost to relayer and register his rewards. - // - // The checks in (1) are trivial, so its computation weight may be ignored. And we only touch - // storage values that are read during the call. So we may ignore the weight of this check. - // - // However, during (2) we read and update storage values of other pallets - // (`pallet-bridge-relayers` and balances/assets pallet). So we need to add this weight to the - // weight of our call. Hence two following methods. - - /// Extra weight that is added to the `receive_messages_proof` call weight by signed extensions - /// that are declared at runtime level. - fn receive_messages_proof_overhead_from_runtime() -> Weight; - - /// Extra weight that is added to the `receive_messages_delivery_proof` call weight by signed - /// extensions that are declared at runtime level. - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight; - - // Functions that are directly mapped to extrinsics weights. - - /// Weight of message delivery extrinsic. - fn receive_messages_proof_weight( - proof: &impl Size, - messages_count: u32, - dispatch_weight: Weight, - ) -> Weight { - // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_proof_overhead(); - let transaction_overhead_from_runtime = - Self::receive_messages_proof_overhead_from_runtime(); - let outbound_state_delivery_weight = - Self::receive_messages_proof_outbound_lane_state_overhead(); - let messages_delivery_weight = - Self::receive_messages_proof_messages_overhead(MessageNonce::from(messages_count)); - let messages_dispatch_weight = dispatch_weight; - - // proof size overhead weight - let expected_proof_size = EXPECTED_DEFAULT_MESSAGE_LENGTH - .saturating_mul(messages_count.saturating_sub(1)) - .saturating_add(Self::expected_extra_storage_proof_size()); - let actual_proof_size = proof.size(); - let proof_size_overhead = Self::storage_proof_size_overhead( - actual_proof_size.saturating_sub(expected_proof_size), - ); - - transaction_overhead - .saturating_add(transaction_overhead_from_runtime) - .saturating_add(outbound_state_delivery_weight) - .saturating_add(messages_delivery_weight) - .saturating_add(messages_dispatch_weight) - .saturating_add(proof_size_overhead) - } - - /// Weight of confirmation delivery extrinsic. - fn receive_messages_delivery_proof_weight( - proof: &impl Size, - relayers_state: &UnrewardedRelayersState, - ) -> Weight { - // basic components of extrinsic weight - let transaction_overhead = Self::receive_messages_delivery_proof_overhead(); - let transaction_overhead_from_runtime = - Self::receive_messages_delivery_proof_overhead_from_runtime(); - let messages_overhead = - Self::receive_messages_delivery_proof_messages_overhead(relayers_state.total_messages); - let relayers_overhead = Self::receive_messages_delivery_proof_relayers_overhead( - relayers_state.unrewarded_relayer_entries, - ); - - // proof size overhead weight - let expected_proof_size = Self::expected_extra_storage_proof_size(); - let actual_proof_size = proof.size(); - let proof_size_overhead = Self::storage_proof_size_overhead( - actual_proof_size.saturating_sub(expected_proof_size), - ); - - transaction_overhead - .saturating_add(transaction_overhead_from_runtime) - .saturating_add(messages_overhead) - .saturating_add(relayers_overhead) - .saturating_add(proof_size_overhead) - } - - // Functions that are used by extrinsics weights formulas. - - /// Returns weight overhead of message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = - Self::receive_single_message_proof().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - weight_of_two_messages_and_two_tx_overheads - .saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving given a number of messages with - /// message delivery transaction (`receive_messages_proof`). - fn receive_messages_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages_and_single_tx_overhead = Self::receive_two_messages_proof(); - let weight_of_single_message_and_single_tx_overhead = Self::receive_single_message_proof(); - weight_of_two_messages_and_single_tx_overhead - .saturating_sub(weight_of_single_message_and_single_tx_overhead) - .saturating_mul(messages as _) - } - - /// Returns weight that needs to be accounted when message delivery transaction - /// (`receive_messages_proof`) is carrying outbound lane state proof. - fn receive_messages_proof_outbound_lane_state_overhead() -> Weight { - let weight_of_single_message_and_lane_state = - Self::receive_single_message_proof_with_outbound_lane_state(); - let weight_of_single_message = Self::receive_single_message_proof(); - weight_of_single_message_and_lane_state.saturating_sub(weight_of_single_message) - } - - /// Returns weight overhead of delivery confirmation transaction - /// (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_overhead() -> Weight { - let weight_of_two_messages_and_two_tx_overheads = - Self::receive_delivery_proof_for_single_message().saturating_mul(2); - let weight_of_two_messages_and_single_tx_overhead = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - weight_of_two_messages_and_two_tx_overheads - .saturating_sub(weight_of_two_messages_and_single_tx_overhead) - } - - /// Returns weight that needs to be accounted when receiving confirmations for given a number of - /// messages with delivery confirmation transaction (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_messages_overhead(messages: MessageNonce) -> Weight { - let weight_of_two_messages = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - let weight_of_single_message = Self::receive_delivery_proof_for_single_message(); - weight_of_two_messages - .saturating_sub(weight_of_single_message) - .saturating_mul(messages as _) - } - - /// Returns weight that needs to be accounted when receiving confirmations for given a number of - /// relayers entries with delivery confirmation transaction (`receive_messages_delivery_proof`). - fn receive_messages_delivery_proof_relayers_overhead(relayers: MessageNonce) -> Weight { - let weight_of_two_messages_by_two_relayers = - Self::receive_delivery_proof_for_two_messages_by_two_relayers(); - let weight_of_two_messages_by_single_relayer = - Self::receive_delivery_proof_for_two_messages_by_single_relayer(); - weight_of_two_messages_by_two_relayers - .saturating_sub(weight_of_two_messages_by_single_relayer) - .saturating_mul(relayers as _) - } - - /// Returns weight that needs to be accounted when storage proof of given size is received - /// (either in `receive_messages_proof` or `receive_messages_delivery_proof`). - /// - /// **IMPORTANT**: this overhead is already included in the 'base' transaction cost - e.g. proof - /// size depends on messages count or number of entries in the unrewarded relayers set. So this - /// shouldn't be added to cost of transaction, but instead should act as a minimal cost that the - /// relayer must pay when it relays proof of given size (even if cost based on other parameters - /// is less than that cost). - fn storage_proof_size_overhead(proof_size: u32) -> Weight { - let proof_size_in_bytes = proof_size; - let byte_weight = (Self::receive_single_message_proof_16_kb() - - Self::receive_single_message_proof_1_kb()) / - (15 * 1024); - proof_size_in_bytes * byte_weight - } - - // Functions that may be used by runtime developers. - - /// Returns dispatch weight of message of given size. - /// - /// This function would return correct value only if your runtime is configured to run - /// `receive_single_message_proof_with_dispatch` benchmark. See its requirements for - /// details. - fn message_dispatch_weight(message_size: u32) -> Weight { - // There may be a tiny overweight/underweight here, because we don't account how message - // size affects all steps before dispatch. But the effect should be small enough and we - // may ignore it. - Self::receive_single_message_proof_with_dispatch(message_size) - .saturating_sub(Self::receive_single_message_proof()) - } -} - -impl WeightInfoExt for () { - fn expected_extra_storage_proof_size() -> u32 { - EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - Weight::zero() - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - Weight::zero() - } -} - -impl WeightInfoExt for crate::weights::BridgeWeight { - fn expected_extra_storage_proof_size() -> u32 { - EXTRA_STORAGE_PROOF_SIZE - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - Weight::zero() - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - Weight::zero() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{mock::TestRuntime, weights::BridgeWeight}; - - #[test] - fn ensure_default_weights_are_correct() { - ensure_weights_are_correct::>(); - } -} diff --git a/modules/parachains/Cargo.toml b/modules/parachains/Cargo.toml deleted file mode 100644 index 2011f2cbbcc5e41dae2266eb72c6760a3ce45bef..0000000000000000000000000000000000000000 --- a/modules/parachains/Cargo.toml +++ /dev/null @@ -1,70 +0,0 @@ -[package] -name = "pallet-bridge-parachains" -version = "0.7.0" -description = "Module that allows bridged relay chains to exchange information on their parachains' heads." -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Bridge Dependencies - -bp-header-chain = { path = "../../primitives/header-chain", default-features = false } -bp-parachains = { path = "../../primitives/parachains", default-features = false } -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-grandpa = { path = "../grandpa", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -bp-header-chain = { path = "../../primitives/header-chain" } -bp-test-utils = { path = "../../primitives/test-utils" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-parachains/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-bridge-grandpa/std", - "scale-info/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-bridge-grandpa/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-bridge-grandpa/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/modules/parachains/README.md b/modules/parachains/README.md deleted file mode 100644 index 9ca6080383443df2a72041e9089c0eb5c861fbc0..0000000000000000000000000000000000000000 --- a/modules/parachains/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# Bridge Parachains Pallet - -The bridge parachains pallet is a light client for one or several parachains of the bridged relay chain. -It serves as a source of finalized parachain headers and is used when you need to build a bridge with -a parachain. - -The pallet requires [bridge GRANDPA pallet](../grandpa/) to be deployed at the same chain - it is used -to verify storage proofs, generated at the bridged relay chain. - -## A Brief Introduction into Parachains Finality - -You can find detailed information on parachains finality in the -[Polkadot-SDK](https://github.com/paritytech/polkadot-sdk) repository. This section gives a brief overview of how the -parachain finality works and how to build a light client for a parachain. - -The main thing there is that the parachain generates blocks on its own, but it can't achieve finality without -help of its relay chain. Instead, the parachain collators create a block and hand it over to the relay chain -validators. Validators validate the block and register the new parachain head in the -[`Heads` map](https://github.com/paritytech/polkadot-sdk/blob/bc5005217a8c2e7c95b9011c96d7e619879b1200/polkadot/runtime/parachains/src/paras/mod.rs#L683-L686) -of the [`paras`](https://github.com/paritytech/polkadot-sdk/tree/master/polkadot/runtime/parachains/src/paras) pallet, -deployed at the relay chain. Keep in mind that this pallet, deployed at a relay chain, is **NOT** a bridge pallet, -even though the names are similar. - -And what the bridge parachains pallet does, is simply verifying storage proofs of parachain heads within that -`Heads` map. It does that using relay chain header, that has been previously imported by the -[bridge GRANDPA pallet](../grandpa/). Once the proof is verified, the pallet knows that the given parachain -header has been finalized by the relay chain. The parachain header fields may then be used to verify storage -proofs, coming from the parachain. This allows the pallet to be used e.g. as a source of finality for the messages -pallet. - -## Pallet Operations - -The main entrypoint of the pallet is the `submit_parachain_heads` call. It has three arguments: - -- storage proof of parachain heads from the `Heads` map; - -- parachain identifiers and hashes of their heads from the storage proof; - -- the relay block, at which the storage proof has been generated. - -The pallet may track multiple parachains. And the parachains may use different primitives - one may use 128-bit block -numbers, other - 32-bit. To avoid extra decode operations, the pallet is using relay chain block number to order -parachain headers. Any finalized descendant of finalized relay block `RB`, which has parachain block `PB` in -its `Heads` map, is guaranteed to have either `PB`, or its descendant. So parachain block number grows with relay -block number. - -The pallet may reject parachain head if it already knows better (or the same) head. In addition, pallet rejects -heads of untracked parachains. - -The pallet doesn't track anything behind parachain heads. So it requires no initialization - it is ready to accept -headers right after deployment. - -## Non-Essential Functionality - -There may be a special account in every runtime where the bridge parachains module is deployed. This -account, named 'module owner', is like a module-level sudo account - he's able to halt and -resume all module operations without requiring runtime upgrade. Calls that are related to this -account are: - -- `fn set_owner()`: current module owner may call it to transfer "ownership" to another account; - -- `fn set_operating_mode()`: the module owner (or sudo account) may call this function to stop all - module operations. After this call, all finality proofs will be rejected until further `set_operating_mode` call'. - This call may be used when something extraordinary happens with the bridge. - -If pallet owner is not defined, the governance may be used to make those calls. - -## Signed Extension to Reject Obsolete Headers - -It'd be better for anyone (for chain and for submitters) to reject all transactions that are submitting -already known parachain heads to the pallet. This way, we leave block space to other useful transactions and -we don't charge concurrent submitters for their honest actions. - -To deal with that, we have a [signed extension](./src/call_ext) that may be added to the runtime. -It does exactly what is required - rejects all transactions with already known heads. The submitter -pays nothing for such transactions - they're simply removed from the transaction pool, when the block -is built. - -The signed extension, however, is a bit limited - it only works with transactions that provide single -parachain head. So it won't work with multiple parachain heads transactions. This fits our needs -for [Kusama <> Polkadot bridge](../../docs/polkadot-kusama-bridge-overview.md). If you need to deal -with other transaction formats, you may implement similar extension for your runtime. - -You may also take a look at the [`generate_bridge_reject_obsolete_headers_and_messages`](../../bin/runtime-common/src/lib.rs) -macro that bundles several similar signed extensions in a single one. - -## Parachains Finality Relay - -We have an offchain actor, who is watching for new parachain heads and submits them to the bridged chain. -It is the parachains relay - you may look at the [crate level documentation and the code](../../relays/parachains/). diff --git a/modules/parachains/src/benchmarking.rs b/modules/parachains/src/benchmarking.rs deleted file mode 100644 index 27e06a12a1d93486d93aa258afc1d7de4713df2c..0000000000000000000000000000000000000000 --- a/modules/parachains/src/benchmarking.rs +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Parachains finality pallet benchmarking. - -use crate::{ - weights_ext::DEFAULT_PARACHAIN_HEAD_SIZE, Call, RelayBlockHash, RelayBlockHasher, - RelayBlockNumber, -}; - -use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; -use bp_runtime::StorageProofSize; -use frame_benchmarking::{account, benchmarks_instance_pallet}; -use frame_system::RawOrigin; -use sp_std::prelude::*; - -/// Pallet we're benchmarking here. -pub struct Pallet, I: 'static = ()>(crate::Pallet); - -/// Trait that must be implemented by runtime to benchmark the parachains finality pallet. -pub trait Config: crate::Config { - /// Returns vector of supported parachains. - fn parachains() -> Vec; - /// Generate parachain heads proof and prepare environment for verifying this proof. - fn prepare_parachain_heads_proof( - parachains: &[ParaId], - parachain_head_size: u32, - proof_size: StorageProofSize, - ) -> (RelayBlockNumber, RelayBlockHash, ParaHeadsProof, Vec<(ParaId, ParaHash)>); -} - -benchmarks_instance_pallet! { - where_clause { - where - >::BridgedChain: - bp_runtime::Chain< - BlockNumber = RelayBlockNumber, - Hash = RelayBlockHash, - Hasher = RelayBlockHasher, - >, - } - - // Benchmark `submit_parachain_heads` extrinsic with different number of parachains. - submit_parachain_heads_with_n_parachains { - let p in 1..(T::parachains().len() + 1) as u32; - - let sender = account("sender", 0, 0); - let mut parachains = T::parachains(); - let _ = if p <= parachains.len() as u32 { - parachains.split_off(p as usize) - } else { - Default::default() - }; - log::trace!(target: crate::LOG_TARGET, "=== {:?}", parachains.len()); - let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( - ¶chains, - DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::Minimal(0), - ); - let at_relay_block = (relay_block_number, relay_block_hash); - }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) - verify { - for parachain in parachains { - assert!(crate::Pallet::::best_parachain_head(parachain).is_some()); - } - } - - // Benchmark `submit_parachain_heads` extrinsic with 1kb proof size. - submit_parachain_heads_with_1kb_proof { - let sender = account("sender", 0, 0); - let parachains = vec![T::parachains()[0]]; - let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( - ¶chains, - DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::HasLargeLeaf(1024), - ); - let at_relay_block = (relay_block_number, relay_block_hash); - }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) - verify { - for parachain in parachains { - assert!(crate::Pallet::::best_parachain_head(parachain).is_some()); - } - } - - // Benchmark `submit_parachain_heads` extrinsic with 16kb proof size. - submit_parachain_heads_with_16kb_proof { - let sender = account("sender", 0, 0); - let parachains = vec![T::parachains()[0]]; - let (relay_block_number, relay_block_hash, parachain_heads_proof, parachains_heads) = T::prepare_parachain_heads_proof( - ¶chains, - DEFAULT_PARACHAIN_HEAD_SIZE, - StorageProofSize::HasLargeLeaf(16 * 1024), - ); - let at_relay_block = (relay_block_number, relay_block_hash); - }: submit_parachain_heads(RawOrigin::Signed(sender), at_relay_block, parachains_heads, parachain_heads_proof) - verify { - for parachain in parachains { - assert!(crate::Pallet::::best_parachain_head(parachain).is_some()); - } - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) -} diff --git a/modules/parachains/src/call_ext.rs b/modules/parachains/src/call_ext.rs deleted file mode 100644 index d54256a87dfa711845a3ab2e67292b47ac2a4f60..0000000000000000000000000000000000000000 --- a/modules/parachains/src/call_ext.rs +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{Config, GrandpaPalletOf, Pallet, RelayBlockHash, RelayBlockNumber}; -use bp_header_chain::HeaderChain; -use bp_parachains::BestParaHeadHash; -use bp_polkadot_core::parachains::{ParaHash, ParaId}; -use bp_runtime::OwnedBridgeModule; -use frame_support::{ - dispatch::CallableCallFor, - traits::{Get, IsSubType}, -}; -use pallet_bridge_grandpa::SubmitFinalityProofHelper; -use sp_runtime::{ - traits::Zero, - transaction_validity::{InvalidTransaction, TransactionValidityError}, - RuntimeDebug, -}; - -/// Info about a `SubmitParachainHeads` call which tries to update a single parachain. -#[derive(PartialEq, RuntimeDebug)] -pub struct SubmitParachainHeadsInfo { - /// Number and hash of the finalized relay block that has been used to prove parachain - /// finality. - pub at_relay_block: (RelayBlockNumber, RelayBlockHash), - /// Parachain identifier. - pub para_id: ParaId, - /// Hash of the bundled parachain head. - pub para_head_hash: ParaHash, - /// If `true`, then the call must be free (assuming that everything else is valid) to - /// be treated as valid. - pub is_free_execution_expected: bool, -} - -/// Verified `SubmitParachainHeadsInfo`. -#[derive(PartialEq, RuntimeDebug)] -pub struct VerifiedSubmitParachainHeadsInfo { - /// Base call information. - pub base: SubmitParachainHeadsInfo, - /// A difference between bundled bridged relay chain header and relay chain header number - /// used to prove best bridged parachain header, known to us before the call. - pub improved_by: RelayBlockNumber, -} - -/// Helper struct that provides methods for working with the `SubmitParachainHeads` call. -pub struct SubmitParachainHeadsHelper, I: 'static> { - _phantom_data: sp_std::marker::PhantomData<(T, I)>, -} - -impl, I: 'static> SubmitParachainHeadsHelper { - /// Check that is called from signed extension and takes the `is_free_execution_expected` - /// into account. - pub fn check_obsolete_from_extension( - update: &SubmitParachainHeadsInfo, - ) -> Result { - // first do all base checks - let improved_by = Self::check_obsolete(update)?; - - // if we don't expect free execution - no more checks - if !update.is_free_execution_expected { - return Ok(improved_by); - } - - // reject if no more free slots remaining in the block - if !SubmitFinalityProofHelper::::can_import_anything_for_free() { - log::trace!( - target: crate::LOG_TARGET, - "The free parachain {:?} head can't be updated: no more free slots \ - left in the block.", - update.para_id, - ); - - return Err(InvalidTransaction::Call.into()); - } - - // reject if we are importing parachain headers too often - if let Some(free_headers_interval) = T::FreeHeadersInterval::get() { - let reject = improved_by < free_headers_interval; - - if reject { - log::trace!( - target: crate::LOG_TARGET, - "The free parachain {:?} head can't be updated: it improves previous - best head by {} while at least {} is expected.", - update.para_id, - improved_by, - free_headers_interval, - ); - - return Err(InvalidTransaction::Stale.into()); - } - } else { - // free headers interval is not configured and call is expected to execute - // for free => it is a relayer error, it should've been able to detect that - } - - Ok(improved_by) - } - - /// Check if the para head provided by the `SubmitParachainHeads` is better than the best one - /// we know. - pub fn check_obsolete( - update: &SubmitParachainHeadsInfo, - ) -> Result { - // check if we know better parachain head already - let improved_by = match crate::ParasInfo::::get(update.para_id) { - Some(stored_best_head) => { - let improved_by = match update - .at_relay_block - .0 - .checked_sub(stored_best_head.best_head_hash.at_relay_block_number) - { - Some(improved_by) if improved_by > Zero::zero() => improved_by, - _ => { - log::trace!( - target: crate::LOG_TARGET, - "The parachain head can't be updated. The parachain head for {:?} \ - was already updated at better relay chain block {} >= {}.", - update.para_id, - stored_best_head.best_head_hash.at_relay_block_number, - update.at_relay_block.0 - ); - return Err(InvalidTransaction::Stale.into()) - }, - }; - - if stored_best_head.best_head_hash.head_hash == update.para_head_hash { - log::trace!( - target: crate::LOG_TARGET, - "The parachain head can't be updated. The parachain head hash for {:?} \ - was already updated to {} at block {} < {}.", - update.para_id, - update.para_head_hash, - stored_best_head.best_head_hash.at_relay_block_number, - update.at_relay_block.0 - ); - return Err(InvalidTransaction::Stale.into()) - } - - improved_by - }, - None => RelayBlockNumber::MAX, - }; - - // let's check if our chain had no reorgs and we still know the relay chain header - // used to craft the proof - if GrandpaPalletOf::::finalized_header_state_root(update.at_relay_block.1).is_none() { - log::trace!( - target: crate::LOG_TARGET, - "The parachain {:?} head can't be updated. Relay chain header {}/{} used to create \ - parachain proof is missing from the storage.", - update.para_id, - update.at_relay_block.0, - update.at_relay_block.1, - ); - - return Err(InvalidTransaction::Call.into()) - } - - Ok(improved_by) - } - - /// Check if the `SubmitParachainHeads` was successfully executed. - pub fn was_successful(update: &SubmitParachainHeadsInfo) -> bool { - match crate::ParasInfo::::get(update.para_id) { - Some(stored_best_head) => - stored_best_head.best_head_hash == - BestParaHeadHash { - at_relay_block_number: update.at_relay_block.0, - head_hash: update.para_head_hash, - }, - None => false, - } - } -} - -/// Trait representing a call that is a sub type of this pallet's call. -pub trait CallSubType, I: 'static>: - IsSubType, T>> -{ - /// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with - /// one single parachain entry. - fn one_entry_submit_parachain_heads_info(&self) -> Option { - match self.is_sub_type() { - Some(crate::Call::::submit_parachain_heads { - ref at_relay_block, - ref parachains, - .. - }) => match ¶chains[..] { - &[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo { - at_relay_block: *at_relay_block, - para_id, - para_head_hash, - is_free_execution_expected: false, - }), - _ => None, - }, - Some(crate::Call::::submit_parachain_heads_ex { - ref at_relay_block, - ref parachains, - is_free_execution_expected, - .. - }) => match ¶chains[..] { - &[(para_id, para_head_hash)] => Some(SubmitParachainHeadsInfo { - at_relay_block: *at_relay_block, - para_id, - para_head_hash, - is_free_execution_expected: *is_free_execution_expected, - }), - _ => None, - }, - _ => None, - } - } - - /// Create a new instance of `SubmitParachainHeadsInfo` from a `SubmitParachainHeads` call with - /// one single parachain entry, if the entry is for the provided parachain id. - fn submit_parachain_heads_info_for(&self, para_id: u32) -> Option { - self.one_entry_submit_parachain_heads_info() - .filter(|update| update.para_id.0 == para_id) - } - - /// Validate parachain heads in order to avoid "mining" transactions that provide - /// outdated bridged parachain heads. Without this validation, even honest relayers - /// may lose their funds if there are multiple relays running and submitting the - /// same information. - /// - /// This validation only works with transactions that are updating single parachain - /// head. We can't use unbounded validation - it may take too long and either break - /// block production, or "eat" significant portion of block production time literally - /// for nothing. In addition, the single-parachain-head-per-transaction is how the - /// pallet will be used in our environment. - fn check_obsolete_submit_parachain_heads( - &self, - ) -> Result, TransactionValidityError> - where - Self: Sized, - { - let update = match self.one_entry_submit_parachain_heads_info() { - Some(update) => update, - None => return Ok(None), - }; - - if Pallet::::ensure_not_halted().is_err() { - return Err(InvalidTransaction::Call.into()) - } - - SubmitParachainHeadsHelper::::check_obsolete_from_extension(&update) - .map(|improved_by| Some(VerifiedSubmitParachainHeadsInfo { base: update, improved_by })) - } -} - -impl CallSubType for T::RuntimeCall -where - T: Config, - T::RuntimeCall: IsSubType, T>>, -{ -} - -#[cfg(test)] -mod tests { - use crate::{ - mock::{run_test, FreeHeadersInterval, RuntimeCall, TestRuntime}, - CallSubType, PalletOperatingMode, ParaInfo, ParasInfo, RelayBlockHash, RelayBlockNumber, - }; - use bp_header_chain::StoredHeaderData; - use bp_parachains::BestParaHeadHash; - use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; - use bp_runtime::BasicOperatingMode; - - fn validate_submit_parachain_heads( - num: RelayBlockNumber, - parachains: Vec<(ParaId, ParaHash)>, - ) -> bool { - RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { - at_relay_block: (num, [num as u8; 32].into()), - parachains, - parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, - is_free_execution_expected: false, - }) - .check_obsolete_submit_parachain_heads() - .is_ok() - } - - fn validate_free_submit_parachain_heads( - num: RelayBlockNumber, - parachains: Vec<(ParaId, ParaHash)>, - ) -> bool { - RuntimeCall::Parachains(crate::Call::::submit_parachain_heads_ex { - at_relay_block: (num, [num as u8; 32].into()), - parachains, - parachain_heads_proof: ParaHeadsProof { storage_proof: Vec::new() }, - is_free_execution_expected: true, - }) - .check_obsolete_submit_parachain_heads() - .is_ok() - } - - fn insert_relay_block(num: RelayBlockNumber) { - pallet_bridge_grandpa::ImportedHeaders::::insert( - RelayBlockHash::from([num as u8; 32]), - StoredHeaderData { number: num, state_root: RelayBlockHash::from([10u8; 32]) }, - ); - } - - fn sync_to_relay_header_10() { - ParasInfo::::insert( - ParaId(1), - ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 10, - head_hash: [1u8; 32].into(), - }, - next_imported_hash_position: 0, - }, - ); - } - - #[test] - fn extension_rejects_header_from_the_obsolete_relay_block() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#5 => tx is - // rejected - sync_to_relay_header_10(); - assert!(!validate_submit_parachain_heads(5, vec![(ParaId(1), [1u8; 32].into())])); - }); - } - - #[test] - fn extension_rejects_header_from_the_same_relay_block() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#10 => tx is - // rejected - sync_to_relay_header_10(); - assert!(!validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())])); - }); - } - - #[test] - fn extension_rejects_header_from_new_relay_block_with_same_hash() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#10 => tx is - // rejected - sync_to_relay_header_10(); - assert!(!validate_submit_parachain_heads(20, vec![(ParaId(1), [1u8; 32].into())])); - }); - } - - #[test] - fn extension_rejects_header_if_pallet_is_halted() { - run_test(|| { - // when pallet is halted => tx is rejected - sync_to_relay_header_10(); - PalletOperatingMode::::put(BasicOperatingMode::Halted); - - assert!(!validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); - }); - } - - #[test] - fn extension_accepts_new_header() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#15 => tx is - // accepted - sync_to_relay_header_10(); - insert_relay_block(15); - assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); - }); - } - - #[test] - fn extension_accepts_if_more_than_one_parachain_is_submitted() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#5, but another - // parachain head is also supplied => tx is accepted - sync_to_relay_header_10(); - assert!(validate_submit_parachain_heads( - 5, - vec![(ParaId(1), [1u8; 32].into()), (ParaId(2), [1u8; 32].into())] - )); - }); - } - - #[test] - fn extension_rejects_initial_parachain_head_if_missing_relay_chain_header() { - run_test(|| { - // when relay chain header is unknown => "obsolete" - assert!(!validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())])); - // when relay chain header is unknown => "ok" - insert_relay_block(10); - assert!(validate_submit_parachain_heads(10, vec![(ParaId(1), [1u8; 32].into())])); - }); - } - - #[test] - fn extension_rejects_free_parachain_head_if_missing_relay_chain_header() { - run_test(|| { - sync_to_relay_header_10(); - // when relay chain header is unknown => "obsolete" - assert!(!validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())])); - // when relay chain header is unknown => "ok" - insert_relay_block(15); - assert!(validate_submit_parachain_heads(15, vec![(ParaId(2), [15u8; 32].into())])); - }); - } - - #[test] - fn extension_rejects_free_parachain_head_if_no_free_slots_remaining() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#15 => tx should - // be accepted - sync_to_relay_header_10(); - insert_relay_block(15); - // ... but since we have specified `is_free_execution_expected = true`, it'll be - // rejected - assert!(!validate_free_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); - // ... if we have specify `is_free_execution_expected = false`, it'll be accepted - assert!(validate_submit_parachain_heads(15, vec![(ParaId(1), [2u8; 32].into())])); - }); - } - - #[test] - fn extension_rejects_free_parachain_head_if_improves_by_is_below_expected() { - run_test(|| { - // when current best finalized is #10 and we're trying to import header#15 => tx should - // be accepted - sync_to_relay_header_10(); - insert_relay_block(10 + FreeHeadersInterval::get() - 1); - insert_relay_block(10 + FreeHeadersInterval::get()); - // try to submit at 10 + FreeHeadersInterval::get() - 1 => failure - let relay_header = 10 + FreeHeadersInterval::get() - 1; - assert!(!validate_free_submit_parachain_heads( - relay_header, - vec![(ParaId(1), [2u8; 32].into())] - )); - // try to submit at 10 + FreeHeadersInterval::get() => ok - let relay_header = 10 + FreeHeadersInterval::get(); - assert!(validate_free_submit_parachain_heads( - relay_header, - vec![(ParaId(1), [2u8; 32].into())] - )); - }); - } -} diff --git a/modules/parachains/src/lib.rs b/modules/parachains/src/lib.rs deleted file mode 100644 index f56df053ee114a7e3dcf2246ca55e83cb78bcf6d..0000000000000000000000000000000000000000 --- a/modules/parachains/src/lib.rs +++ /dev/null @@ -1,1898 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Parachains finality module. -//! -//! This module needs to be deployed with GRANDPA module, which is syncing relay -//! chain blocks. The main entry point of this module is `submit_parachain_heads`, which -//! accepts storage proof of some parachain `Heads` entries from bridged relay chain. -//! It requires corresponding relay headers to be already synced. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use weights::WeightInfo; -pub use weights_ext::WeightInfoExt; - -use bp_header_chain::{HeaderChain, HeaderChainError}; -use bp_parachains::{parachain_head_storage_key_at_source, ParaInfo, ParaStoredHeaderData}; -use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::{Chain, HashOf, HeaderId, HeaderIdOf, Parachain, StorageProofError}; -use frame_support::{dispatch::PostDispatchInfo, DefaultNoBound}; -use pallet_bridge_grandpa::SubmitFinalityProofHelper; -use sp_std::{marker::PhantomData, vec::Vec}; - -#[cfg(feature = "runtime-benchmarks")] -use bp_parachains::ParaStoredHeaderDataBuilder; -#[cfg(feature = "runtime-benchmarks")] -use bp_runtime::HeaderOf; -#[cfg(feature = "runtime-benchmarks")] -use codec::Encode; - -// Re-export in crate namespace for `construct_runtime!`. -pub use call_ext::*; -pub use pallet::*; - -pub mod weights; -pub mod weights_ext; - -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; - -mod call_ext; -#[cfg(test)] -mod mock; - -/// The target that will be used when publishing logs related to this pallet. -pub const LOG_TARGET: &str = "runtime::bridge-parachains"; - -/// Block hash of the bridged relay chain. -pub type RelayBlockHash = bp_polkadot_core::Hash; -/// Block number of the bridged relay chain. -pub type RelayBlockNumber = bp_polkadot_core::BlockNumber; -/// Hasher of the bridged relay chain. -pub type RelayBlockHasher = bp_polkadot_core::Hasher; - -/// Artifacts of the parachains head update. -struct UpdateParachainHeadArtifacts { - /// New best head of the parachain. - pub best_head: ParaInfo, - /// If `true`, some old parachain head has been pruned during update. - pub prune_happened: bool, -} - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use bp_parachains::{ - BestParaHeadHash, ImportedParaHeadsKeyProvider, ParaStoredHeaderDataBuilder, - ParasInfoKeyProvider, - }; - use bp_runtime::{ - BasicOperatingMode, BoundedStorageValue, OwnedBridgeModule, StorageDoubleMapKeyProvider, - StorageMapKeyProvider, - }; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - /// Stored parachain head data of given parachains pallet. - pub type StoredParaHeadDataOf = - BoundedStorageValue<>::MaxParaHeadDataSize, ParaStoredHeaderData>; - /// Weight info of the given parachains pallet. - pub type WeightInfoOf = >::WeightInfo; - /// Bridge GRANDPA pallet that is used to verify parachain proofs. - pub type GrandpaPalletOf = - pallet_bridge_grandpa::Pallet>::BridgesGrandpaPalletInstance>; - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event, I: 'static = ()> { - /// The caller has provided head of parachain that the pallet is not configured to track. - UntrackedParachainRejected { - /// Identifier of the parachain that is not tracked by the pallet. - parachain: ParaId, - }, - /// The caller has declared that he has provided given parachain head, but it is missing - /// from the storage proof. - MissingParachainHead { - /// Identifier of the parachain with missing head. - parachain: ParaId, - }, - /// The caller has provided parachain head hash that is not matching the hash read from the - /// storage proof. - IncorrectParachainHeadHash { - /// Identifier of the parachain with incorrect head hast. - parachain: ParaId, - /// Specified parachain head hash. - parachain_head_hash: ParaHash, - /// Actual parachain head hash. - actual_parachain_head_hash: ParaHash, - }, - /// The caller has provided obsolete parachain head, which is already known to the pallet. - RejectedObsoleteParachainHead { - /// Identifier of the parachain with obsolete head. - parachain: ParaId, - /// Obsolete parachain head hash. - parachain_head_hash: ParaHash, - }, - /// The caller has provided parachain head that exceeds the maximal configured head size. - RejectedLargeParachainHead { - /// Identifier of the parachain with rejected head. - parachain: ParaId, - /// Parachain head hash. - parachain_head_hash: ParaHash, - /// Parachain head size. - parachain_head_size: u32, - }, - /// Parachain head has been updated. - UpdatedParachainHead { - /// Identifier of the parachain that has been updated. - parachain: ParaId, - /// Parachain head hash. - parachain_head_hash: ParaHash, - }, - } - - #[pallet::error] - pub enum Error { - /// Relay chain block hash is unknown to us. - UnknownRelayChainBlock, - /// The number of stored relay block is different from what the relayer has provided. - InvalidRelayChainBlockNumber, - /// Parachain heads storage proof is invalid. - HeaderChainStorageProof(HeaderChainError), - /// Error generated by the `OwnedBridgeModule` trait. - BridgeModule(bp_runtime::OwnedBridgeModuleError), - } - - /// Convenience trait for defining `BridgedChain` bounds. - pub trait BoundedBridgeGrandpaConfig: - pallet_bridge_grandpa::Config - { - /// Type of the bridged relay chain. - type BridgedRelayChain: Chain< - BlockNumber = RelayBlockNumber, - Hash = RelayBlockHash, - Hasher = RelayBlockHasher, - >; - } - - impl BoundedBridgeGrandpaConfig for T - where - T: pallet_bridge_grandpa::Config, - T::BridgedChain: - Chain, - { - type BridgedRelayChain = T::BridgedChain; - } - - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: - BoundedBridgeGrandpaConfig - { - /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; - /// Benchmarks results from runtime we're plugged into. - type WeightInfo: WeightInfoExt; - - /// Instance of bridges GRANDPA pallet (within this runtime) that this pallet is linked to. - /// - /// The GRANDPA pallet instance must be configured to import headers of relay chain that - /// we're interested in. - /// - /// The associated GRANDPA pallet is also used to configure free parachain heads - /// submissions. The parachain head submission will be free if: - /// - /// 1) the submission contains exactly one parachain head update that succeeds; - /// - /// 2) the difference between relay chain block numbers, used to prove new parachain head - /// and previous best parachain head is larger than the `FreeHeadersInterval`, configured - /// at the associated GRANDPA pallet; - /// - /// 3) there are slots for free submissions, remaining at the block. This is also configured - /// at the associated GRANDPA pallet using `MaxFreeHeadersPerBlock` parameter. - /// - /// First parachain head submission is also free for the submitted, if free submissions - /// are yet accepted to this block. - type BridgesGrandpaPalletInstance: 'static; - - /// Name of the original `paras` pallet in the `construct_runtime!()` call at the bridged - /// chain. - /// - /// Please keep in mind that this should be the name of the `runtime_parachains::paras` - /// pallet from polkadot repository, not the `pallet-bridge-parachains`. - #[pallet::constant] - type ParasPalletName: Get<&'static str>; - - /// Parachain head data builder. - /// - /// We never store parachain heads here, since they may be too big (e.g. because of large - /// digest items). Instead we're using the same approach as `pallet-bridge-grandpa` - /// pallet - we are only storing `bp_messages::StoredHeaderData` (number and state root), - /// which is enough for our applications. However, we work with different parachains here - /// and they can use different primitives (for block numbers and hash). So we can't store - /// it directly. Instead, we're storing `bp_messages::StoredHeaderData` in SCALE-encoded - /// form, wrapping it into `bp_parachains::ParaStoredHeaderData`. - /// - /// This builder helps to convert from `HeadData` to `bp_parachains::ParaStoredHeaderData`. - type ParaStoredHeaderDataBuilder: ParaStoredHeaderDataBuilder; - - /// Maximal number of single parachain heads to keep in the storage. - /// - /// The setting is there to prevent growing the on-chain state indefinitely. Note - /// the setting does not relate to parachain block numbers - we will simply keep as much - /// items in the storage, so it doesn't guarantee any fixed timeframe for heads. - /// - /// Incautious change of this constant may lead to orphan entries in the runtime storage. - #[pallet::constant] - type HeadsToKeep: Get; - - /// Maximal size (in bytes) of the SCALE-encoded parachain head data - /// (`bp_parachains::ParaStoredHeaderData`). - /// - /// Keep in mind that the size of any tracked parachain header data must not exceed this - /// value. So if you're going to track multiple parachains, one of which is using large - /// hashes, you shall choose this maximal value. - /// - /// There's no mandatory headers in this pallet, so it can't stall if there's some header - /// that exceeds this bound. - #[pallet::constant] - type MaxParaHeadDataSize: Get; - } - - /// Optional pallet owner. - /// - /// Pallet owner has a right to halt all pallet operations and then resume them. If it is - /// `None`, then there are no direct ways to halt/resume pallet operations, but other - /// runtime methods may still be used to do that (i.e. democracy::referendum to update halt - /// flag directly or call the `halt_operations`). - #[pallet::storage] - pub type PalletOwner, I: 'static = ()> = - StorageValue<_, T::AccountId, OptionQuery>; - - /// The current operating mode of the pallet. - /// - /// Depending on the mode either all, or no transactions will be allowed. - #[pallet::storage] - pub type PalletOperatingMode, I: 'static = ()> = - StorageValue<_, BasicOperatingMode, ValueQuery>; - - /// Parachains info. - /// - /// Contains the following info: - /// - best parachain head hash - /// - the head of the `ImportedParaHashes` ring buffer - #[pallet::storage] - pub type ParasInfo, I: 'static = ()> = StorageMap< - Hasher = ::Hasher, - Key = ::Key, - Value = ::Value, - QueryKind = OptionQuery, - OnEmpty = GetDefault, - MaxValues = MaybeMaxParachains, - >; - - /// State roots of parachain heads which have been imported into the pallet. - #[pallet::storage] - pub type ImportedParaHeads, I: 'static = ()> = StorageDoubleMap< - Hasher1 = ::Hasher1, - Key1 = ::Key1, - Hasher2 = ::Hasher2, - Key2 = ::Key2, - Value = StoredParaHeadDataOf, - QueryKind = OptionQuery, - OnEmpty = GetDefault, - MaxValues = MaybeMaxTotalParachainHashes, - >; - - /// A ring buffer of imported parachain head hashes. Ordered by the insertion time. - #[pallet::storage] - pub(super) type ImportedParaHashes, I: 'static = ()> = StorageDoubleMap< - Hasher1 = Blake2_128Concat, - Key1 = ParaId, - Hasher2 = Twox64Concat, - Key2 = u32, - Value = ParaHash, - QueryKind = OptionQuery, - OnEmpty = GetDefault, - MaxValues = MaybeMaxTotalParachainHashes, - >; - - #[pallet::pallet] - pub struct Pallet(PhantomData<(T, I)>); - - impl, I: 'static> OwnedBridgeModule for Pallet { - const LOG_TARGET: &'static str = LOG_TARGET; - type OwnerStorage = PalletOwner; - type OperatingMode = BasicOperatingMode; - type OperatingModeStorage = PalletOperatingMode; - } - - #[pallet::call] - impl, I: 'static> Pallet { - /// Submit proof of one or several parachain heads. - /// - /// The proof is supposed to be proof of some `Heads` entries from the - /// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain. - /// The proof is supposed to be crafted at the `relay_header_hash` that must already be - /// imported by corresponding GRANDPA pallet at this chain. - /// - /// The call fails if: - /// - /// - the pallet is halted; - /// - /// - the relay chain block `at_relay_block` is not imported by the associated bridge - /// GRANDPA pallet. - /// - /// The call may succeed, but some heads may not be updated e.g. because pallet knows - /// better head or it isn't tracked by the pallet. - #[pallet::call_index(0)] - #[pallet::weight(WeightInfoOf::::submit_parachain_heads_weight( - T::DbWeight::get(), - parachain_heads_proof, - parachains.len() as _, - ))] - pub fn submit_parachain_heads( - origin: OriginFor, - at_relay_block: (RelayBlockNumber, RelayBlockHash), - parachains: Vec<(ParaId, ParaHash)>, - parachain_heads_proof: ParaHeadsProof, - ) -> DispatchResultWithPostInfo { - Self::submit_parachain_heads_ex( - origin, - at_relay_block, - parachains, - parachain_heads_proof, - false, - ) - } - - /// Change `PalletOwner`. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(1)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_owner(origin: OriginFor, new_owner: Option) -> DispatchResult { - >::set_owner(origin, new_owner) - } - - /// Halt or resume all pallet operations. - /// - /// May only be called either by root, or by `PalletOwner`. - #[pallet::call_index(2)] - #[pallet::weight((T::DbWeight::get().reads_writes(1, 1), DispatchClass::Operational))] - pub fn set_operating_mode( - origin: OriginFor, - operating_mode: BasicOperatingMode, - ) -> DispatchResult { - >::set_operating_mode(origin, operating_mode) - } - - /// Submit proof of one or several parachain heads. - /// - /// The proof is supposed to be proof of some `Heads` entries from the - /// `polkadot-runtime-parachains::paras` pallet instance, deployed at the bridged chain. - /// The proof is supposed to be crafted at the `relay_header_hash` that must already be - /// imported by corresponding GRANDPA pallet at this chain. - /// - /// The call fails if: - /// - /// - the pallet is halted; - /// - /// - the relay chain block `at_relay_block` is not imported by the associated bridge - /// GRANDPA pallet. - /// - /// The call may succeed, but some heads may not be updated e.g. because pallet knows - /// better head or it isn't tracked by the pallet. - /// - /// The `is_free_execution_expected` parameter is not really used inside the call. It is - /// used by the transaction extension, which should be registered at the runtime level. If - /// this parameter is `true`, the transaction will be treated as invalid, if the call won't - /// be executed for free. If transaction extension is not used by the runtime, this - /// parameter is not used at all. - #[pallet::call_index(3)] - #[pallet::weight(WeightInfoOf::::submit_parachain_heads_weight( - T::DbWeight::get(), - parachain_heads_proof, - parachains.len() as _, - ))] - pub fn submit_parachain_heads_ex( - origin: OriginFor, - at_relay_block: (RelayBlockNumber, RelayBlockHash), - parachains: Vec<(ParaId, ParaHash)>, - parachain_heads_proof: ParaHeadsProof, - _is_free_execution_expected: bool, - ) -> DispatchResultWithPostInfo { - Self::ensure_not_halted().map_err(Error::::BridgeModule)?; - ensure_signed(origin)?; - - let total_parachains = parachains.len(); - let free_headers_interval = - T::FreeHeadersInterval::get().unwrap_or(RelayBlockNumber::MAX); - // the pallet allows two kind of free submissions - // 1) if distance between all parachain heads is gte than the [`T::FreeHeadersInterval`] - // 2) if all heads are the first heads of their parachains - let mut free_parachain_heads = 0; - - // we'll need relay chain header to verify that parachains heads are always increasing. - let (relay_block_number, relay_block_hash) = at_relay_block; - let relay_block = pallet_bridge_grandpa::ImportedHeaders::< - T, - T::BridgesGrandpaPalletInstance, - >::get(relay_block_hash) - .ok_or(Error::::UnknownRelayChainBlock)?; - ensure!( - relay_block.number == relay_block_number, - Error::::InvalidRelayChainBlockNumber, - ); - - // now parse storage proof and read parachain heads - let mut actual_weight = WeightInfoOf::::submit_parachain_heads_weight( - T::DbWeight::get(), - ¶chain_heads_proof, - parachains.len() as _, - ); - - let mut is_updated_something = false; - let mut storage = GrandpaPalletOf::::storage_proof_checker( - relay_block_hash, - parachain_heads_proof.storage_proof, - ) - .map_err(Error::::HeaderChainStorageProof)?; - - for (parachain, parachain_head_hash) in parachains { - let parachain_head = match Self::read_parachain_head(&mut storage, parachain) { - Ok(Some(parachain_head)) => parachain_head, - Ok(None) => { - log::trace!( - target: LOG_TARGET, - "The head of parachain {:?} is None. {}", - parachain, - if ParasInfo::::contains_key(parachain) { - "Looks like it is not yet registered at the source relay chain" - } else { - "Looks like it has been deregistered from the source relay chain" - }, - ); - Self::deposit_event(Event::MissingParachainHead { parachain }); - continue - }, - Err(e) => { - log::trace!( - target: LOG_TARGET, - "The read of head of parachain {:?} has failed: {:?}", - parachain, - e, - ); - Self::deposit_event(Event::MissingParachainHead { parachain }); - continue - }, - }; - - // if relayer has specified invalid parachain head hash, ignore the head - // (this isn't strictly necessary, but better safe than sorry) - let actual_parachain_head_hash = parachain_head.hash(); - if parachain_head_hash != actual_parachain_head_hash { - log::trace!( - target: LOG_TARGET, - "The submitter has specified invalid parachain {:?} head hash: \ - {:?} vs {:?}", - parachain, - parachain_head_hash, - actual_parachain_head_hash, - ); - Self::deposit_event(Event::IncorrectParachainHeadHash { - parachain, - parachain_head_hash, - actual_parachain_head_hash, - }); - continue - } - - // convert from parachain head into stored parachain head data - let parachain_head_size = parachain_head.0.len(); - let parachain_head_data = - match T::ParaStoredHeaderDataBuilder::try_build(parachain, ¶chain_head) { - Some(parachain_head_data) => parachain_head_data, - None => { - log::trace!( - target: LOG_TARGET, - "The head of parachain {:?} has been provided, but it is not tracked by the pallet", - parachain, - ); - Self::deposit_event(Event::UntrackedParachainRejected { parachain }); - continue - }, - }; - - let update_result: Result<_, ()> = - ParasInfo::::try_mutate(parachain, |stored_best_head| { - let is_free = parachain_head_size < - T::ParaStoredHeaderDataBuilder::max_free_head_size() as usize && - match stored_best_head { - Some(ref best_head) - if at_relay_block.0.saturating_sub( - best_head.best_head_hash.at_relay_block_number, - ) >= free_headers_interval => - true, - Some(_) => false, - None => true, - }; - let artifacts = Pallet::::update_parachain_head( - parachain, - stored_best_head.take(), - (relay_block_number, relay_block_hash), - parachain_head_data, - parachain_head_hash, - )?; - - is_updated_something = true; - if is_free { - free_parachain_heads = free_parachain_heads + 1; - } - - *stored_best_head = Some(artifacts.best_head); - Ok(artifacts.prune_happened) - }); - - // we're refunding weight if update has not happened and if pruning has not happened - let is_update_happened = update_result.is_ok(); - if !is_update_happened { - actual_weight = actual_weight.saturating_sub( - WeightInfoOf::::parachain_head_storage_write_weight( - T::DbWeight::get(), - ), - ); - } - let is_prune_happened = matches!(update_result, Ok(true)); - if !is_prune_happened { - actual_weight = actual_weight.saturating_sub( - WeightInfoOf::::parachain_head_pruning_weight(T::DbWeight::get()), - ); - } - } - - // even though we may have accepted some parachain heads, we can't allow relayers to - // submit proof with unused trie nodes - // => treat this as an error - // - // (we can throw error here, because now all our calls are transactional) - storage.ensure_no_unused_nodes().map_err(|e| { - Error::::HeaderChainStorageProof(HeaderChainError::StorageProof(e)) - })?; - - // check if we allow this submission for free - let is_free = total_parachains == 1 - && free_parachain_heads == total_parachains - && SubmitFinalityProofHelper::::can_import_anything_for_free(); - let pays_fee = if is_free { - log::trace!(target: LOG_TARGET, "Parachain heads update transaction is free"); - pallet_bridge_grandpa::on_free_header_imported::( - ); - Pays::No - } else { - log::trace!(target: LOG_TARGET, "Parachain heads update transaction is paid"); - Pays::Yes - }; - - Ok(PostDispatchInfo { actual_weight: Some(actual_weight), pays_fee }) - } - } - - impl, I: 'static> Pallet { - /// Get stored parachain info. - pub fn best_parachain_info(parachain: ParaId) -> Option { - ParasInfo::::get(parachain) - } - - /// Get best finalized head data of the given parachain. - pub fn best_parachain_head(parachain: ParaId) -> Option { - let best_para_head_hash = ParasInfo::::get(parachain)?.best_head_hash.head_hash; - ImportedParaHeads::::get(parachain, best_para_head_hash).map(|h| h.into_inner()) - } - - /// Get best finalized head hash of the given parachain. - pub fn best_parachain_head_hash(parachain: ParaId) -> Option { - Some(ParasInfo::::get(parachain)?.best_head_hash.head_hash) - } - - /// Get best finalized head id of the given parachain. - pub fn best_parachain_head_id + Parachain>( - ) -> Result>, codec::Error> { - let parachain = ParaId(C::PARACHAIN_ID); - let best_head_hash = match Self::best_parachain_head_hash(parachain) { - Some(best_head_hash) => best_head_hash, - None => return Ok(None), - }; - let encoded_head = match Self::parachain_head(parachain, best_head_hash) { - Some(encoded_head) => encoded_head, - None => return Ok(None), - }; - encoded_head - .decode_parachain_head_data::() - .map(|data| Some(HeaderId(data.number, best_head_hash))) - } - - /// Get parachain head data with given hash. - pub fn parachain_head(parachain: ParaId, hash: ParaHash) -> Option { - ImportedParaHeads::::get(parachain, hash).map(|h| h.into_inner()) - } - - /// Read parachain head from storage proof. - fn read_parachain_head( - storage: &mut bp_runtime::StorageProofChecker, - parachain: ParaId, - ) -> Result, StorageProofError> { - let parachain_head_key = - parachain_head_storage_key_at_source(T::ParasPalletName::get(), parachain); - storage.read_and_decode_value(parachain_head_key.0.as_ref()) - } - - /// Try to update parachain head. - pub(super) fn update_parachain_head( - parachain: ParaId, - stored_best_head: Option, - new_at_relay_block: (RelayBlockNumber, RelayBlockHash), - new_head_data: ParaStoredHeaderData, - new_head_hash: ParaHash, - ) -> Result { - // check if head has been already updated at better relay chain block. Without this - // check, we may import heads in random order - let update = SubmitParachainHeadsInfo { - at_relay_block: new_at_relay_block, - para_id: parachain, - para_head_hash: new_head_hash, - // don't actually matter here - is_free_execution_expected: false, - }; - if SubmitParachainHeadsHelper::::check_obsolete(&update).is_err() { - Self::deposit_event(Event::RejectedObsoleteParachainHead { - parachain, - parachain_head_hash: new_head_hash, - }); - return Err(()) - } - - // verify that the parachain head data size is <= `MaxParaHeadDataSize` - let updated_head_data = - match StoredParaHeadDataOf::::try_from_inner(new_head_data) { - Ok(updated_head_data) => updated_head_data, - Err(e) => { - log::trace!( - target: LOG_TARGET, - "The parachain head can't be updated. The parachain head data size \ - for {:?} is {}. It exceeds maximal configured size {}.", - parachain, - e.value_size, - e.maximal_size, - ); - - Self::deposit_event(Event::RejectedLargeParachainHead { - parachain, - parachain_head_hash: new_head_hash, - parachain_head_size: e.value_size as _, - }); - - return Err(()) - }, - }; - - let next_imported_hash_position = stored_best_head - .map_or(0, |stored_best_head| stored_best_head.next_imported_hash_position); - - // insert updated best parachain head - let head_hash_to_prune = - ImportedParaHashes::::try_get(parachain, next_imported_hash_position); - let updated_best_para_head = ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: new_at_relay_block.0, - head_hash: new_head_hash, - }, - next_imported_hash_position: (next_imported_hash_position + 1) % - T::HeadsToKeep::get(), - }; - ImportedParaHashes::::insert( - parachain, - next_imported_hash_position, - new_head_hash, - ); - ImportedParaHeads::::insert(parachain, new_head_hash, updated_head_data); - log::trace!( - target: LOG_TARGET, - "Updated head of parachain {:?} to {} at relay block {}", - parachain, - new_head_hash, - new_at_relay_block.0, - ); - - // remove old head - let prune_happened = head_hash_to_prune.is_ok(); - if let Ok(head_hash_to_prune) = head_hash_to_prune { - log::trace!( - target: LOG_TARGET, - "Pruning old head of parachain {:?}: {}", - parachain, - head_hash_to_prune, - ); - ImportedParaHeads::::remove(parachain, head_hash_to_prune); - } - Self::deposit_event(Event::UpdatedParachainHead { - parachain, - parachain_head_hash: new_head_hash, - }); - - Ok(UpdateParachainHeadArtifacts { best_head: updated_best_para_head, prune_happened }) - } - } - - #[pallet::genesis_config] - #[derive(DefaultNoBound)] - pub struct GenesisConfig, I: 'static = ()> { - /// Initial pallet operating mode. - pub operating_mode: BasicOperatingMode, - /// Initial pallet owner. - pub owner: Option, - /// Dummy marker. - pub phantom: sp_std::marker::PhantomData, - } - - #[pallet::genesis_build] - impl, I: 'static> BuildGenesisConfig for GenesisConfig { - fn build(&self) { - PalletOperatingMode::::put(self.operating_mode); - if let Some(ref owner) = self.owner { - PalletOwner::::put(owner); - } - } - } - - /// Returns maximal number of parachains, supported by the pallet. - pub struct MaybeMaxParachains(PhantomData<(T, I)>); - - impl, I: 'static> Get> for MaybeMaxParachains { - fn get() -> Option { - Some(T::ParaStoredHeaderDataBuilder::supported_parachains()) - } - } - - /// Returns total number of all parachains hashes/heads, stored by the pallet. - pub struct MaybeMaxTotalParachainHashes(PhantomData<(T, I)>); - - impl, I: 'static> Get> for MaybeMaxTotalParachainHashes { - fn get() -> Option { - Some( - T::ParaStoredHeaderDataBuilder::supported_parachains() - .saturating_mul(T::HeadsToKeep::get()), - ) - } - } -} - -/// Single parachain header chain adapter. -pub struct ParachainHeaders(PhantomData<(T, I, C)>); - -impl, I: 'static, C: Parachain> HeaderChain - for ParachainHeaders -{ - fn finalized_header_state_root(hash: HashOf) -> Option> { - Pallet::::parachain_head(ParaId(C::PARACHAIN_ID), hash) - .and_then(|head| head.decode_parachain_head_data::().ok()) - .map(|h| h.state_root) - } -} - -/// (Re)initialize pallet with given header for using it in `pallet-bridge-messages` benchmarks. -#[cfg(feature = "runtime-benchmarks")] -pub fn initialize_for_benchmarks, I: 'static, PC: Parachain>( - header: HeaderOf, -) { - let parachain = ParaId(PC::PARACHAIN_ID); - let parachain_head = ParaHead(header.encode()); - let updated_head_data = T::ParaStoredHeaderDataBuilder::try_build(parachain, ¶chain_head) - .expect("failed to build stored parachain head in benchmarks"); - Pallet::::update_parachain_head( - parachain, - None, - (0, Default::default()), - updated_head_data, - parachain_head.hash(), - ) - .expect("failed to insert parachain head in benchmarks"); -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use crate::mock::{ - run_test, test_relay_header, BigParachain, BigParachainHeader, FreeHeadersInterval, - RegularParachainHasher, RegularParachainHeader, RelayBlockHeader, - RuntimeEvent as TestEvent, RuntimeOrigin, TestRuntime, UNTRACKED_PARACHAIN_ID, - }; - use bp_test_utils::prepare_parachain_heads_proof; - use codec::Encode; - - use bp_header_chain::{justification::GrandpaJustification, StoredHeaderGrandpaInfo}; - use bp_parachains::{ - BestParaHeadHash, BridgeParachainCall, ImportedParaHeadsKeyProvider, ParasInfoKeyProvider, - }; - use bp_runtime::{ - BasicOperatingMode, OwnedBridgeModuleError, StorageDoubleMapKeyProvider, - StorageMapKeyProvider, - }; - use bp_test_utils::{ - authority_list, generate_owned_bridge_module_tests, make_default_justification, - TEST_GRANDPA_SET_ID, - }; - use frame_support::{ - assert_noop, assert_ok, - dispatch::DispatchResultWithPostInfo, - pallet_prelude::Pays, - storage::generator::{StorageDoubleMap, StorageMap}, - traits::Get, - weights::Weight, - }; - use frame_system::{EventRecord, Pallet as System, Phase}; - use sp_core::Hasher; - use sp_runtime::{traits::Header as HeaderT, DispatchError}; - - type BridgesGrandpaPalletInstance = pallet_bridge_grandpa::Instance1; - type WeightInfo = ::WeightInfo; - type DbWeight = ::DbWeight; - - pub(crate) fn initialize(state_root: RelayBlockHash) -> RelayBlockHash { - pallet_bridge_grandpa::FreeHeadersRemaining::::set(Some(100)); - pallet_bridge_grandpa::Pallet::::initialize( - RuntimeOrigin::root(), - bp_header_chain::InitializationData { - header: Box::new(test_relay_header(0, state_root)), - authority_list: authority_list(), - set_id: 1, - operating_mode: BasicOperatingMode::Normal, - }, - ) - .unwrap(); - - System::::set_block_number(1); - System::::reset_events(); - - test_relay_header(0, state_root).hash() - } - - fn proceed( - num: RelayBlockNumber, - state_root: RelayBlockHash, - ) -> (ParaHash, GrandpaJustification) { - let header = test_relay_header(num, state_root); - let hash = header.hash(); - let justification = make_default_justification(&header); - assert_ok!( - pallet_bridge_grandpa::Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(header), - justification.clone(), - TEST_GRANDPA_SET_ID, - false, - ) - ); - - (hash, justification) - } - - fn initial_best_head(parachain: u32) -> ParaInfo { - ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 0, - head_hash: head_data(parachain, 0).hash(), - }, - next_imported_hash_position: 1, - } - } - - pub(crate) fn head_data(parachain: u32, head_number: u32) -> ParaHead { - ParaHead( - RegularParachainHeader::new( - head_number as _, - Default::default(), - RegularParachainHasher::hash(&(parachain, head_number).encode()), - Default::default(), - Default::default(), - ) - .encode(), - ) - } - - fn stored_head_data(parachain: u32, head_number: u32) -> ParaStoredHeaderData { - ParaStoredHeaderData( - (head_number as u64, RegularParachainHasher::hash(&(parachain, head_number).encode())) - .encode(), - ) - } - - fn big_head_data(parachain: u32, head_number: u32) -> ParaHead { - ParaHead( - BigParachainHeader::new( - head_number as _, - Default::default(), - RegularParachainHasher::hash(&(parachain, head_number).encode()), - Default::default(), - Default::default(), - ) - .encode(), - ) - } - - fn big_stored_head_data(parachain: u32, head_number: u32) -> ParaStoredHeaderData { - ParaStoredHeaderData( - (head_number as u128, RegularParachainHasher::hash(&(parachain, head_number).encode())) - .encode(), - ) - } - - fn head_hash(parachain: u32, head_number: u32) -> ParaHash { - head_data(parachain, head_number).hash() - } - - fn import_parachain_1_head( - relay_chain_block: RelayBlockNumber, - relay_state_root: RelayBlockHash, - parachains: Vec<(ParaId, ParaHash)>, - proof: ParaHeadsProof, - ) -> DispatchResultWithPostInfo { - Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (relay_chain_block, test_relay_header(relay_chain_block, relay_state_root).hash()), - parachains, - proof, - ) - } - - fn weight_of_import_parachain_1_head(proof: &ParaHeadsProof, prune_expected: bool) -> Weight { - let db_weight = ::DbWeight::get(); - WeightInfoOf::::submit_parachain_heads_weight(db_weight, proof, 1) - .saturating_sub(if prune_expected { - Weight::zero() - } else { - WeightInfoOf::::parachain_head_pruning_weight(db_weight) - }) - } - - #[test] - fn submit_parachain_heads_checks_operating_mode() { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 0))]); - - run_test(|| { - initialize(state_root); - - // `submit_parachain_heads()` should fail when the pallet is halted. - PalletOperatingMode::::put(BasicOperatingMode::Halted); - assert_noop!( - Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains.clone(), - proof.clone(), - ), - Error::::BridgeModule(OwnedBridgeModuleError::Halted) - ); - - // `submit_parachain_heads()` should succeed now that the pallet is resumed. - PalletOperatingMode::::put(BasicOperatingMode::Normal); - assert_ok!(Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - ),); - }); - } - - #[test] - fn imports_initial_parachain_heads() { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![ - (1, head_data(1, 0)), - (3, head_data(3, 10)), - ]); - run_test(|| { - initialize(state_root); - - // we're trying to update heads of parachains 1 and 3 - let expected_weight = - WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 2); - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - ); - assert_ok!(result); - assert_eq!(result.expect("checked above").pays_fee, Pays::Yes); - assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); - - // 1 and 3 are updated, because proof is missing head of parachain#2 - assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); - assert_eq!(ParasInfo::::get(ParaId(2)), None); - assert_eq!( - ParasInfo::::get(ParaId(3)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 0, - head_hash: head_data(3, 10).hash() - }, - next_imported_hash_position: 1, - }) - ); - - assert_eq!( - ImportedParaHeads::::get( - ParaId(1), - initial_best_head(1).best_head_hash.head_hash - ) - .map(|h| h.into_inner()), - Some(stored_head_data(1, 0)) - ); - assert_eq!( - ImportedParaHeads::::get( - ParaId(2), - initial_best_head(2).best_head_hash.head_hash - ) - .map(|h| h.into_inner()), - None - ); - assert_eq!( - ImportedParaHeads::::get(ParaId(3), head_hash(3, 10)) - .map(|h| h.into_inner()), - Some(stored_head_data(3, 10)) - ); - - assert_eq!( - System::::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(3), - parachain_head_hash: head_data(3, 10).hash(), - }), - topics: vec![], - } - ], - ); - }); - } - - #[test] - fn imports_parachain_heads_is_able_to_progress() { - let (state_root_5, proof_5, parachains_5) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 5))]); - let (state_root_10, proof_10, parachains_10) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 10))]); - run_test(|| { - // start with relay block #0 and import head#5 of parachain#1 - initialize(state_root_5); - let result = import_parachain_1_head(0, state_root_5, parachains_5, proof_5); - // first parachain head is imported for free - assert_eq!(result.unwrap().pays_fee, Pays::No); - assert_eq!( - ParasInfo::::get(ParaId(1)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 0, - head_hash: head_data(1, 5).hash() - }, - next_imported_hash_position: 1, - }) - ); - assert_eq!( - ImportedParaHeads::::get(ParaId(1), head_data(1, 5).hash()) - .map(|h| h.into_inner()), - Some(stored_head_data(1, 5)) - ); - assert_eq!( - ImportedParaHeads::::get(ParaId(1), head_data(1, 10).hash()) - .map(|h| h.into_inner()), - None - ); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 5).hash(), - }), - topics: vec![], - }], - ); - - // import head#10 of parachain#1 at relay block #1 - let (relay_1_hash, justification) = proceed(1, state_root_10); - let result = import_parachain_1_head(1, state_root_10, parachains_10, proof_10); - // second parachain head is imported for fee - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - assert_eq!( - ParasInfo::::get(ParaId(1)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 1, - head_hash: head_data(1, 10).hash() - }, - next_imported_hash_position: 2, - }) - ); - assert_eq!( - ImportedParaHeads::::get(ParaId(1), head_data(1, 5).hash()) - .map(|h| h.into_inner()), - Some(stored_head_data(1, 5)) - ); - assert_eq!( - ImportedParaHeads::::get(ParaId(1), head_data(1, 10).hash()) - .map(|h| h.into_inner()), - Some(stored_head_data(1, 10)) - ); - assert_eq!( - System::::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 5).hash(), - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Grandpa1( - pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { - number: 1, - hash: relay_1_hash, - grandpa_info: StoredHeaderGrandpaInfo { - finality_proof: justification, - new_verification_context: None, - }, - } - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 10).hash(), - }), - topics: vec![], - } - ], - ); - }); - } - - #[test] - fn ignores_untracked_parachain() { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![ - (1, head_data(1, 5)), - (UNTRACKED_PARACHAIN_ID, head_data(1, 5)), - (2, head_data(1, 5)), - ]); - run_test(|| { - // start with relay block #0 and try to import head#5 of parachain#1 and untracked - // parachain - let expected_weight = - WeightInfo::submit_parachain_heads_weight(DbWeight::get(), &proof, 3) - .saturating_sub(WeightInfo::parachain_head_storage_write_weight( - DbWeight::get(), - )); - initialize(state_root); - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - ); - assert_ok!(result); - assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); - assert_eq!( - ParasInfo::::get(ParaId(1)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 0, - head_hash: head_data(1, 5).hash() - }, - next_imported_hash_position: 1, - }) - ); - assert_eq!(ParasInfo::::get(ParaId(UNTRACKED_PARACHAIN_ID)), None,); - assert_eq!( - ParasInfo::::get(ParaId(2)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 0, - head_hash: head_data(1, 5).hash() - }, - next_imported_hash_position: 1, - }) - ); - assert_eq!( - System::::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 5).hash(), - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UntrackedParachainRejected { - parachain: ParaId(UNTRACKED_PARACHAIN_ID), - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(2), - parachain_head_hash: head_data(1, 5).hash(), - }), - topics: vec![], - } - ], - ); - }); - } - - #[test] - fn does_nothing_when_already_imported_this_head_at_previous_relay_header() { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 0))]); - run_test(|| { - // import head#0 of parachain#1 at relay block#0 - initialize(state_root); - assert_ok!(import_parachain_1_head(0, state_root, parachains.clone(), proof.clone())); - assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, - }), - topics: vec![], - }], - ); - - // try to import head#0 of parachain#1 at relay block#1 - // => call succeeds, but nothing is changed - let (relay_1_hash, justification) = proceed(1, state_root); - assert_ok!(import_parachain_1_head(1, state_root, parachains, proof)); - assert_eq!(ParasInfo::::get(ParaId(1)), Some(initial_best_head(1))); - assert_eq!( - System::::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Grandpa1( - pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { - number: 1, - hash: relay_1_hash, - grandpa_info: StoredHeaderGrandpaInfo { - finality_proof: justification, - new_verification_context: None, - } - } - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead { - parachain: ParaId(1), - parachain_head_hash: initial_best_head(1).best_head_hash.head_hash, - }), - topics: vec![], - } - ], - ); - }); - } - - #[test] - fn does_nothing_when_already_imported_head_at_better_relay_header() { - let (state_root_5, proof_5, parachains_5) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 5))]); - let (state_root_10, proof_10, parachains_10) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 10))]); - run_test(|| { - // start with relay block #0 - initialize(state_root_5); - - // head#10 of parachain#1 at relay block#1 - let (relay_1_hash, justification) = proceed(1, state_root_10); - assert_ok!(import_parachain_1_head(1, state_root_10, parachains_10, proof_10)); - assert_eq!( - ParasInfo::::get(ParaId(1)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 1, - head_hash: head_data(1, 10).hash() - }, - next_imported_hash_position: 1, - }) - ); - assert_eq!( - System::::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Grandpa1( - pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { - number: 1, - hash: relay_1_hash, - grandpa_info: StoredHeaderGrandpaInfo { - finality_proof: justification.clone(), - new_verification_context: None, - } - } - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 10).hash(), - }), - topics: vec![], - } - ], - ); - - // now try to import head#5 at relay block#0 - // => nothing is changed, because better head has already been imported - assert_ok!(import_parachain_1_head(0, state_root_5, parachains_5, proof_5)); - assert_eq!( - ParasInfo::::get(ParaId(1)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 1, - head_hash: head_data(1, 10).hash() - }, - next_imported_hash_position: 1, - }) - ); - assert_eq!( - System::::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Grandpa1( - pallet_bridge_grandpa::Event::UpdatedBestFinalizedHeader { - number: 1, - hash: relay_1_hash, - grandpa_info: StoredHeaderGrandpaInfo { - finality_proof: justification, - new_verification_context: None, - } - } - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 10).hash(), - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::RejectedObsoleteParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 5).hash(), - }), - topics: vec![], - } - ], - ); - }); - } - - #[test] - fn does_nothing_when_parachain_head_is_too_large() { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![ - (1, head_data(1, 5)), - (4, big_head_data(1, 5)), - ]); - run_test(|| { - // start with relay block #0 and try to import head#5 of parachain#1 and big parachain - initialize(state_root); - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - ); - assert_ok!(result); - assert_eq!( - ParasInfo::::get(ParaId(1)), - Some(ParaInfo { - best_head_hash: BestParaHeadHash { - at_relay_block_number: 0, - head_hash: head_data(1, 5).hash() - }, - next_imported_hash_position: 1, - }) - ); - assert_eq!(ParasInfo::::get(ParaId(4)), None); - assert_eq!( - System::::events(), - vec![ - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::UpdatedParachainHead { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 5).hash(), - }), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::RejectedLargeParachainHead { - parachain: ParaId(4), - parachain_head_hash: big_head_data(1, 5).hash(), - parachain_head_size: big_stored_head_data(1, 5).encoded_size() as u32, - }), - topics: vec![], - }, - ], - ); - }); - } - - #[test] - fn prunes_old_heads() { - run_test(|| { - let heads_to_keep = crate::mock::HeadsToKeep::get(); - - // import exactly `HeadsToKeep` headers - for i in 0..heads_to_keep { - let (state_root, proof, parachains) = prepare_parachain_heads_proof::< - RegularParachainHeader, - >(vec![(1, head_data(1, i))]); - if i == 0 { - initialize(state_root); - } else { - proceed(i, state_root); - } - - let expected_weight = weight_of_import_parachain_1_head(&proof, false); - let result = import_parachain_1_head(i, state_root, parachains, proof); - assert_ok!(result); - assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); - } - - // nothing is pruned yet - for i in 0..heads_to_keep { - assert!(ImportedParaHeads::::get(ParaId(1), head_data(1, i).hash()) - .is_some()); - } - - // import next relay chain header and next parachain head - let (state_root, proof, parachains) = prepare_parachain_heads_proof::< - RegularParachainHeader, - >(vec![(1, head_data(1, heads_to_keep))]); - proceed(heads_to_keep, state_root); - let expected_weight = weight_of_import_parachain_1_head(&proof, true); - let result = import_parachain_1_head(heads_to_keep, state_root, parachains, proof); - assert_ok!(result); - assert_eq!(result.expect("checked above").actual_weight, Some(expected_weight)); - - // and the head#0 is pruned - assert!( - ImportedParaHeads::::get(ParaId(1), head_data(1, 0).hash()).is_none() - ); - for i in 1..=heads_to_keep { - assert!(ImportedParaHeads::::get(ParaId(1), head_data(1, i).hash()) - .is_some()); - } - }); - } - - #[test] - fn fails_on_unknown_relay_chain_block() { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 5))]); - run_test(|| { - // start with relay block #0 - initialize(state_root); - - // try to import head#5 of parachain#1 at unknown relay chain block #1 - assert_noop!( - import_parachain_1_head(1, state_root, parachains, proof), - Error::::UnknownRelayChainBlock - ); - }); - } - - #[test] - fn fails_on_invalid_storage_proof() { - let (_state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 5))]); - run_test(|| { - // start with relay block #0 - initialize(Default::default()); - - // try to import head#5 of parachain#1 at relay chain block #0 - assert_noop!( - import_parachain_1_head(0, Default::default(), parachains, proof), - Error::::HeaderChainStorageProof(HeaderChainError::StorageProof( - StorageProofError::StorageRootMismatch - )) - ); - }); - } - - #[test] - fn is_not_rewriting_existing_head_if_failed_to_read_updated_head() { - let (state_root_5, proof_5, parachains_5) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 5))]); - let (state_root_10_at_20, proof_10_at_20, parachains_10_at_20) = - prepare_parachain_heads_proof::(vec![(2, head_data(2, 10))]); - let (state_root_10_at_30, proof_10_at_30, parachains_10_at_30) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 10))]); - run_test(|| { - // we've already imported head#5 of parachain#1 at relay block#10 - initialize(state_root_5); - import_parachain_1_head(0, state_root_5, parachains_5, proof_5).expect("ok"); - assert_eq!( - Pallet::::best_parachain_head(ParaId(1)), - Some(stored_head_data(1, 5)) - ); - - // then if someone is pretending to provide updated head#10 of parachain#1 at relay - // block#20, but fails to do that - // - // => we'll leave previous value - proceed(20, state_root_10_at_20); - assert_ok!(Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (20, test_relay_header(20, state_root_10_at_20).hash()), - parachains_10_at_20, - proof_10_at_20, - ),); - assert_eq!( - Pallet::::best_parachain_head(ParaId(1)), - Some(stored_head_data(1, 5)) - ); - - // then if someone is pretending to provide updated head#10 of parachain#1 at relay - // block#30, and actually provides it - // - // => we'll update value - proceed(30, state_root_10_at_30); - assert_ok!(Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (30, test_relay_header(30, state_root_10_at_30).hash()), - parachains_10_at_30, - proof_10_at_30, - ),); - assert_eq!( - Pallet::::best_parachain_head(ParaId(1)), - Some(stored_head_data(1, 10)) - ); - }); - } - - #[test] - fn storage_keys_computed_properly() { - assert_eq!( - ParasInfo::::storage_map_final_key(ParaId(42)).to_vec(), - ParasInfoKeyProvider::final_key("Parachains", &ParaId(42)).0 - ); - - assert_eq!( - ImportedParaHeads::::storage_double_map_final_key( - ParaId(42), - ParaHash::from([21u8; 32]) - ) - .to_vec(), - ImportedParaHeadsKeyProvider::final_key( - "Parachains", - &ParaId(42), - &ParaHash::from([21u8; 32]) - ) - .0, - ); - } - - #[test] - fn ignores_parachain_head_if_it_is_missing_from_storage_proof() { - let (state_root, proof, _) = - prepare_parachain_heads_proof::(vec![]); - let parachains = vec![(ParaId(2), Default::default())]; - run_test(|| { - initialize(state_root); - assert_ok!(Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - )); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::MissingParachainHead { - parachain: ParaId(2), - }), - topics: vec![], - }], - ); - }); - } - - #[test] - fn ignores_parachain_head_if_parachain_head_hash_is_wrong() { - let (state_root, proof, _) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 0))]); - let parachains = vec![(ParaId(1), head_data(1, 10).hash())]; - run_test(|| { - initialize(state_root); - assert_ok!(Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - )); - assert_eq!( - System::::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::Parachains(Event::IncorrectParachainHeadHash { - parachain: ParaId(1), - parachain_head_hash: head_data(1, 10).hash(), - actual_parachain_head_hash: head_data(1, 0).hash(), - }), - topics: vec![], - }], - ); - }); - } - - #[test] - fn test_bridge_parachain_call_is_correctly_defined() { - let (state_root, proof, _) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 0))]); - let parachains = vec![(ParaId(2), Default::default())]; - let relay_header_id = (0, test_relay_header(0, state_root).hash()); - - let direct_submit_parachain_heads_call = Call::::submit_parachain_heads { - at_relay_block: relay_header_id, - parachains: parachains.clone(), - parachain_heads_proof: proof.clone(), - }; - let indirect_submit_parachain_heads_call = BridgeParachainCall::submit_parachain_heads { - at_relay_block: relay_header_id, - parachains, - parachain_heads_proof: proof, - }; - assert_eq!( - direct_submit_parachain_heads_call.encode(), - indirect_submit_parachain_heads_call.encode() - ); - } - - generate_owned_bridge_module_tests!(BasicOperatingMode::Normal, BasicOperatingMode::Halted); - - #[test] - fn maybe_max_parachains_returns_correct_value() { - assert_eq!(MaybeMaxParachains::::get(), Some(mock::TOTAL_PARACHAINS)); - } - - #[test] - fn maybe_max_total_parachain_hashes_returns_correct_value() { - assert_eq!( - MaybeMaxTotalParachainHashes::::get(), - Some(mock::TOTAL_PARACHAINS * mock::HeadsToKeep::get()), - ); - } - - #[test] - fn submit_finality_proof_requires_signed_origin() { - run_test(|| { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(1, head_data(1, 0))]); - - initialize(state_root); - - // `submit_parachain_heads()` should fail when the pallet is halted. - assert_noop!( - Pallet::::submit_parachain_heads( - RuntimeOrigin::root(), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - ), - DispatchError::BadOrigin - ); - }) - } - - #[test] - fn may_be_free_for_submitting_filtered_heads() { - run_test(|| { - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(2, head_data(2, 5))]); - // start with relay block #0 and import head#5 of parachain#2 - initialize(state_root); - // first submission is free - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains.clone(), - proof.clone(), - ); - assert_eq!(result.unwrap().pays_fee, Pays::No); - // next submission is NOT free, because we haven't updated anything - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (0, test_relay_header(0, state_root).hash()), - parachains, - proof, - ); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - // then we submit new head, proved at relay block `FreeHeadersInterval - 1` => Pays::Yes - let (state_root, proof, parachains) = prepare_parachain_heads_proof::< - RegularParachainHeader, - >(vec![(2, head_data(2, 50))]); - let relay_block_number = FreeHeadersInterval::get() - 1; - proceed(relay_block_number, state_root); - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), - parachains, - proof, - ); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - // then we submit new head, proved after `FreeHeadersInterval` => Pays::No - let (state_root, proof, parachains) = prepare_parachain_heads_proof::< - RegularParachainHeader, - >(vec![(2, head_data(2, 100))]); - let relay_block_number = relay_block_number + FreeHeadersInterval::get(); - proceed(relay_block_number, state_root); - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), - parachains, - proof, - ); - assert_eq!(result.unwrap().pays_fee, Pays::No); - // then we submit new BIG head, proved after `FreeHeadersInterval` => Pays::Yes - // then we submit new head, proved after `FreeHeadersInterval` => Pays::No - let mut large_head = head_data(2, 100); - large_head.0.extend(&[42u8; BigParachain::MAX_HEADER_SIZE as _]); - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(2, large_head)]); - let relay_block_number = relay_block_number + FreeHeadersInterval::get(); - proceed(relay_block_number, state_root); - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), - parachains, - proof, - ); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - }) - } - - #[test] - fn grandpa_and_parachain_pallets_share_free_headers_counter() { - run_test(|| { - initialize(Default::default()); - // set free headers limit to `4` - let mut free_headers_remaining = 4; - pallet_bridge_grandpa::FreeHeadersRemaining::::set( - Some(free_headers_remaining), - ); - // import free GRANDPA and parachain headers - let mut relay_block_number = 0; - for i in 0..2 { - // import free GRANDPA header - let (state_root, proof, parachains) = prepare_parachain_heads_proof::< - RegularParachainHeader, - >(vec![(2, head_data(2, 5 + i))]); - relay_block_number = relay_block_number + FreeHeadersInterval::get(); - proceed(relay_block_number, state_root); - assert_eq!( - pallet_bridge_grandpa::FreeHeadersRemaining::< - TestRuntime, - BridgesGrandpaPalletInstance, - >::get(), - Some(free_headers_remaining - 1), - ); - free_headers_remaining = free_headers_remaining - 1; - // import free parachain header - assert_ok!(Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), - parachains, - proof, - ),); - assert_eq!( - pallet_bridge_grandpa::FreeHeadersRemaining::< - TestRuntime, - BridgesGrandpaPalletInstance, - >::get(), - Some(free_headers_remaining - 1), - ); - free_headers_remaining = free_headers_remaining - 1; - } - // try to import free GRANDPA header => non-free execution - let (state_root, proof, parachains) = - prepare_parachain_heads_proof::(vec![(2, head_data(2, 7))]); - relay_block_number = relay_block_number + FreeHeadersInterval::get(); - let result = pallet_bridge_grandpa::Pallet::::submit_finality_proof_ex( - RuntimeOrigin::signed(1), - Box::new(test_relay_header(relay_block_number, state_root)), - make_default_justification(&test_relay_header(relay_block_number, state_root)), - TEST_GRANDPA_SET_ID, - false, - ); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - // try to import free parachain header => non-free execution - let result = Pallet::::submit_parachain_heads( - RuntimeOrigin::signed(1), - (relay_block_number, test_relay_header(relay_block_number, state_root).hash()), - parachains, - proof, - ); - assert_eq!(result.unwrap().pays_fee, Pays::Yes); - assert_eq!( - pallet_bridge_grandpa::FreeHeadersRemaining::< - TestRuntime, - BridgesGrandpaPalletInstance, - >::get(), - Some(0), - ); - }); - } -} diff --git a/modules/parachains/src/mock.rs b/modules/parachains/src/mock.rs deleted file mode 100644 index 8e0fa1de15b8b77766f992c9a464af4958201a6a..0000000000000000000000000000000000000000 --- a/modules/parachains/src/mock.rs +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use bp_header_chain::ChainWithGrandpa; -use bp_polkadot_core::parachains::ParaId; -use bp_runtime::{Chain, ChainId, Parachain}; -use frame_support::{ - construct_runtime, derive_impl, parameter_types, traits::ConstU32, weights::Weight, -}; -use sp_runtime::{ - testing::H256, - traits::{BlakeTwo256, Header as HeaderT}, - MultiSignature, -}; - -use crate as pallet_bridge_parachains; - -pub type AccountId = u64; - -pub type RelayBlockHeader = - sp_runtime::generic::Header; - -type Block = frame_system::mocking::MockBlock; - -pub const PARAS_PALLET_NAME: &str = "Paras"; -pub const UNTRACKED_PARACHAIN_ID: u32 = 10; -// use exact expected encoded size: `vec_len_size + header_number_size + state_root_hash_size` -pub const MAXIMAL_PARACHAIN_HEAD_DATA_SIZE: u32 = 1 + 8 + 32; -// total parachains that we use in tests -pub const TOTAL_PARACHAINS: u32 = 4; - -pub type RegularParachainHeader = sp_runtime::testing::Header; -pub type RegularParachainHasher = BlakeTwo256; -pub type BigParachainHeader = sp_runtime::generic::Header; - -pub struct Parachain1; - -impl Chain for Parachain1 { - const ID: ChainId = *b"pch1"; - - type BlockNumber = u64; - type Hash = H256; - type Hasher = RegularParachainHasher; - type Header = RegularParachainHeader; - type AccountId = u64; - type Balance = u64; - type Nonce = u64; - type Signature = MultiSignature; - - fn max_extrinsic_size() -> u32 { - 0 - } - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } -} - -impl Parachain for Parachain1 { - const PARACHAIN_ID: u32 = 1; - const MAX_HEADER_SIZE: u32 = 1_024; -} - -pub struct Parachain2; - -impl Chain for Parachain2 { - const ID: ChainId = *b"pch2"; - - type BlockNumber = u64; - type Hash = H256; - type Hasher = RegularParachainHasher; - type Header = RegularParachainHeader; - type AccountId = u64; - type Balance = u64; - type Nonce = u64; - type Signature = MultiSignature; - - fn max_extrinsic_size() -> u32 { - 0 - } - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } -} - -impl Parachain for Parachain2 { - const PARACHAIN_ID: u32 = 2; - const MAX_HEADER_SIZE: u32 = 1_024; -} - -pub struct Parachain3; - -impl Chain for Parachain3 { - const ID: ChainId = *b"pch3"; - - type BlockNumber = u64; - type Hash = H256; - type Hasher = RegularParachainHasher; - type Header = RegularParachainHeader; - type AccountId = u64; - type Balance = u64; - type Nonce = u64; - type Signature = MultiSignature; - - fn max_extrinsic_size() -> u32 { - 0 - } - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } -} - -impl Parachain for Parachain3 { - const PARACHAIN_ID: u32 = 3; - const MAX_HEADER_SIZE: u32 = 1_024; -} - -// this parachain is using u128 as block number and stored head data size exceeds limit -pub struct BigParachain; - -impl Chain for BigParachain { - const ID: ChainId = *b"bpch"; - - type BlockNumber = u128; - type Hash = H256; - type Hasher = RegularParachainHasher; - type Header = BigParachainHeader; - type AccountId = u64; - type Balance = u64; - type Nonce = u64; - type Signature = MultiSignature; - - fn max_extrinsic_size() -> u32 { - 0 - } - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } -} - -impl Parachain for BigParachain { - const PARACHAIN_ID: u32 = 4; - const MAX_HEADER_SIZE: u32 = 2_048; -} - -construct_runtime! { - pub enum TestRuntime - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Grandpa1: pallet_bridge_grandpa::::{Pallet, Event}, - Grandpa2: pallet_bridge_grandpa::::{Pallet, Event}, - Parachains: pallet_bridge_parachains::{Call, Pallet, Event}, - } -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type Block = Block; -} - -parameter_types! { - pub const HeadersToKeep: u32 = 5; - pub const FreeHeadersInterval: u32 = 15; -} - -impl pallet_bridge_grandpa::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = TestBridgedChain; - type MaxFreeHeadersPerBlock = ConstU32<2>; - type FreeHeadersInterval = FreeHeadersInterval; - type HeadersToKeep = HeadersToKeep; - type WeightInfo = (); -} - -impl pallet_bridge_grandpa::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type BridgedChain = TestBridgedChain; - type MaxFreeHeadersPerBlock = ConstU32<2>; - type FreeHeadersInterval = FreeHeadersInterval; - type HeadersToKeep = HeadersToKeep; - type WeightInfo = (); -} - -parameter_types! { - pub const HeadsToKeep: u32 = 4; - pub const ParasPalletName: &'static str = PARAS_PALLET_NAME; - pub GetTenFirstParachains: Vec = (0..10).map(ParaId).collect(); -} - -impl pallet_bridge_parachains::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = (); - type BridgesGrandpaPalletInstance = pallet_bridge_grandpa::Instance1; - type ParasPalletName = ParasPalletName; - type ParaStoredHeaderDataBuilder = (Parachain1, Parachain2, Parachain3, BigParachain); - type HeadsToKeep = HeadsToKeep; - type MaxParaHeadDataSize = ConstU32; -} - -#[cfg(feature = "runtime-benchmarks")] -impl pallet_bridge_parachains::benchmarking::Config<()> for TestRuntime { - fn parachains() -> Vec { - vec![ - ParaId(Parachain1::PARACHAIN_ID), - ParaId(Parachain2::PARACHAIN_ID), - ParaId(Parachain3::PARACHAIN_ID), - ] - } - - fn prepare_parachain_heads_proof( - parachains: &[ParaId], - _parachain_head_size: u32, - _proof_size: bp_runtime::StorageProofSize, - ) -> ( - crate::RelayBlockNumber, - crate::RelayBlockHash, - bp_polkadot_core::parachains::ParaHeadsProof, - Vec<(ParaId, bp_polkadot_core::parachains::ParaHash)>, - ) { - // in mock run we only care about benchmarks correctness, not the benchmark results - // => ignore size related arguments - let (state_root, proof, parachains) = - bp_test_utils::prepare_parachain_heads_proof::( - parachains.iter().map(|p| (p.0, crate::tests::head_data(p.0, 1))).collect(), - ); - let relay_genesis_hash = crate::tests::initialize(state_root); - (0, relay_genesis_hash, proof, parachains) - } -} - -#[derive(Debug)] -pub struct TestBridgedChain; - -impl Chain for TestBridgedChain { - const ID: ChainId = *b"tbch"; - - type BlockNumber = crate::RelayBlockNumber; - type Hash = crate::RelayBlockHash; - type Hasher = crate::RelayBlockHasher; - type Header = RelayBlockHeader; - - type AccountId = AccountId; - type Balance = u32; - type Nonce = u32; - type Signature = sp_runtime::testing::TestSignature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl ChainWithGrandpa for TestBridgedChain { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; - const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_MANDATORY_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE: u32 = 64; -} - -#[derive(Debug)] -pub struct OtherBridgedChain; - -impl Chain for OtherBridgedChain { - const ID: ChainId = *b"obch"; - - type BlockNumber = u64; - type Hash = crate::RelayBlockHash; - type Hasher = crate::RelayBlockHasher; - type Header = sp_runtime::generic::Header; - - type AccountId = AccountId; - type Balance = u32; - type Nonce = u32; - type Signature = sp_runtime::testing::TestSignature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl ChainWithGrandpa for OtherBridgedChain { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = ""; - const MAX_AUTHORITIES_COUNT: u32 = 16; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 8; - const MAX_MANDATORY_HEADER_SIZE: u32 = 256; - const AVERAGE_HEADER_SIZE: u32 = 64; -} - -/// Return test externalities to use in tests. -pub fn new_test_ext() -> sp_io::TestExternalities { - sp_io::TestExternalities::new(Default::default()) -} - -/// Run pallet test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - new_test_ext().execute_with(|| { - System::set_block_number(1); - System::reset_events(); - test() - }) -} - -/// Return test relay chain header with given number. -pub fn test_relay_header( - num: crate::RelayBlockNumber, - state_root: crate::RelayBlockHash, -) -> RelayBlockHeader { - RelayBlockHeader::new( - num, - Default::default(), - state_root, - Default::default(), - Default::default(), - ) -} diff --git a/modules/parachains/src/weights.rs b/modules/parachains/src/weights.rs deleted file mode 100644 index abddc8768947006e574bf6bca4d2301c2047199a..0000000000000000000000000000000000000000 --- a/modules/parachains/src/weights.rs +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for pallet_bridge_parachains -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-03-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// target/release/unknown-bridge-node -// benchmark -// pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_bridge_parachains -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/parachains/src/weights.rs -// --template=./.maintain/bridge-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_bridge_parachains. -pub trait WeightInfo { - fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight; - fn submit_parachain_heads_with_1kb_proof() -> Weight; - fn submit_parachain_heads_with_16kb_proof() -> Weight; -} - -/// Weights for `pallet_bridge_parachains` that are generated using one of the Bridge testnets. -/// -/// Those weights are test only and must never be used in production. -pub struct BridgeWeight(PhantomData); -impl WeightInfo for BridgeWeight { - /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: - /// 555, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: - /// Some(64), added: 1549, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: - /// Some(196), added: 1681, mode: MaxEncodedLen) - /// - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 36_701 nanoseconds. - Weight::from_parts(38_597_828, 4648) - // Standard Error: 190_859 - .saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into())) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: - /// 555, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: - /// Some(64), added: 1549, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: - /// Some(196), added: 1681, mode: MaxEncodedLen) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 38_189 nanoseconds. - Weight::from_parts(39_252_000, 4648) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: - /// 555, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: - /// Some(64), added: 1549, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: - /// Some(196), added: 1681, mode: MaxEncodedLen) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 62_868 nanoseconds. - Weight::from_parts(63_581_000, 4648) - .saturating_add(T::DbWeight::get().reads(4_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: - /// 555, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: - /// Some(64), added: 1549, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: - /// Some(196), added: 1681, mode: MaxEncodedLen) - /// - /// The range of component `p` is `[1, 2]`. - fn submit_parachain_heads_with_n_parachains(p: u32) -> Weight { - // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 36_701 nanoseconds. - Weight::from_parts(38_597_828, 4648) - // Standard Error: 190_859 - .saturating_add(Weight::from_parts(60_685, 0).saturating_mul(p.into())) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: - /// 555, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: - /// Some(64), added: 1549, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: - /// Some(196), added: 1681, mode: MaxEncodedLen) - fn submit_parachain_heads_with_1kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 38_189 nanoseconds. - Weight::from_parts(39_252_000, 4648) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: BridgeUnknownParachains PalletOperatingMode (r:1 w:0) - /// - /// Proof: BridgeUnknownParachains PalletOperatingMode (max_values: Some(1), max_size: Some(1), - /// added: 496, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownGrandpa ImportedHeaders (r:1 w:0) - /// - /// Proof: BridgeUnknownGrandpa ImportedHeaders (max_values: Some(14400), max_size: Some(68), - /// added: 2048, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ParasInfo (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ParasInfo (max_values: Some(1), max_size: Some(60), added: - /// 555, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHashes (r:1 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHashes (max_values: Some(1024), max_size: - /// Some(64), added: 1549, mode: MaxEncodedLen) - /// - /// Storage: BridgeUnknownParachains ImportedParaHeads (r:0 w:1) - /// - /// Proof: BridgeUnknownParachains ImportedParaHeads (max_values: Some(1024), max_size: - /// Some(196), added: 1681, mode: MaxEncodedLen) - fn submit_parachain_heads_with_16kb_proof() -> Weight { - // Proof Size summary in bytes: - // Measured: `366` - // Estimated: `4648` - // Minimum execution time: 62_868 nanoseconds. - Weight::from_parts(63_581_000, 4648) - .saturating_add(RocksDbWeight::get().reads(4_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } -} diff --git a/modules/parachains/src/weights_ext.rs b/modules/parachains/src/weights_ext.rs deleted file mode 100644 index 64dad625de08b3fd0cd96c255ee80fafa8df2be9..0000000000000000000000000000000000000000 --- a/modules/parachains/src/weights_ext.rs +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Weight-related utilities. - -use crate::weights::{BridgeWeight, WeightInfo}; - -use bp_runtime::Size; -use frame_support::weights::{RuntimeDbWeight, Weight}; - -/// Size of the regular parachain head. -/// -/// It's not that we are expecting all parachain heads to share the same size or that we would -/// reject all heads that have larger/lesser size. It is about head size that we use in benchmarks. -/// Relayer would need to pay additional fee for extra bytes. -/// -/// 384 is a bit larger (1.3 times) than the size of the randomly chosen Polkadot block. -pub const DEFAULT_PARACHAIN_HEAD_SIZE: u32 = 384; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// some generic chain. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// Extended weight info. -pub trait WeightInfoExt: WeightInfo { - // Our configuration assumes that the runtime has special signed extensions used to: - // - // 1) boost priority of `submit_parachain_heads` transactions; - // - // 2) slash relayer if he submits an invalid transaction. - // - // We read and update storage values of other pallets (`pallet-bridge-relayers` and - // balances/assets pallet). So we need to add this weight to the weight of our call. - // Hence two following methods. - - /// Extra weight that is added to the `submit_finality_proof` call weight by signed extensions - /// that are declared at runtime level. - fn submit_parachain_heads_overhead_from_runtime() -> Weight; - - /// Storage proof overhead, that is included in every storage proof. - /// - /// The relayer would pay some extra fee for additional proof bytes, since they mean - /// more hashing operations. - fn expected_extra_storage_proof_size() -> u32; - - /// Weight of the parachain heads delivery extrinsic. - fn submit_parachain_heads_weight( - db_weight: RuntimeDbWeight, - proof: &impl Size, - parachains_count: u32, - ) -> Weight { - // weight of the `submit_parachain_heads` with exactly `parachains_count` parachain - // heads of the default size (`DEFAULT_PARACHAIN_HEAD_SIZE`) - let base_weight = Self::submit_parachain_heads_with_n_parachains(parachains_count); - - // overhead because of extra storage proof bytes - let expected_proof_size = parachains_count - .saturating_mul(DEFAULT_PARACHAIN_HEAD_SIZE) - .saturating_add(Self::expected_extra_storage_proof_size()); - let actual_proof_size = proof.size(); - let proof_size_overhead = Self::storage_proof_size_overhead( - actual_proof_size.saturating_sub(expected_proof_size), - ); - - // potential pruning weight (refunded if hasn't happened) - let pruning_weight = - Self::parachain_head_pruning_weight(db_weight).saturating_mul(parachains_count as u64); - - base_weight - .saturating_add(proof_size_overhead) - .saturating_add(pruning_weight) - .saturating_add(Self::submit_parachain_heads_overhead_from_runtime()) - } - - /// Returns weight of single parachain head storage update. - /// - /// This weight only includes db write operations that happens if parachain head is actually - /// updated. All extra weights (weight of storage proof validation, additional checks, ...) is - /// not included. - fn parachain_head_storage_write_weight(db_weight: RuntimeDbWeight) -> Weight { - // it's just a couple of operations - we need to write the hash (`ImportedParaHashes`) and - // the head itself (`ImportedParaHeads`. Pruning is not included here - db_weight.writes(2) - } - - /// Returns weight of single parachain head pruning. - fn parachain_head_pruning_weight(db_weight: RuntimeDbWeight) -> Weight { - // it's just one write operation, we don't want any benchmarks for that - db_weight.writes(1) - } - - /// Returns weight that needs to be accounted when storage proof of given size is received. - fn storage_proof_size_overhead(extra_proof_bytes: u32) -> Weight { - let extra_byte_weight = (Self::submit_parachain_heads_with_16kb_proof() - - Self::submit_parachain_heads_with_1kb_proof()) / - (15 * 1024); - extra_byte_weight.saturating_mul(extra_proof_bytes as u64) - } -} - -impl WeightInfoExt for () { - fn submit_parachain_heads_overhead_from_runtime() -> Weight { - Weight::zero() - } - - fn expected_extra_storage_proof_size() -> u32 { - EXTRA_STORAGE_PROOF_SIZE - } -} - -impl WeightInfoExt for BridgeWeight { - fn submit_parachain_heads_overhead_from_runtime() -> Weight { - Weight::zero() - } - - fn expected_extra_storage_proof_size() -> u32 { - EXTRA_STORAGE_PROOF_SIZE - } -} diff --git a/modules/relayers/Cargo.toml b/modules/relayers/Cargo.toml deleted file mode 100644 index 5e93e31965b39f5d2db2f624552b25376d31f135..0000000000000000000000000000000000000000 --- a/modules/relayers/Cargo.toml +++ /dev/null @@ -1,70 +0,0 @@ -[package] -name = "pallet-bridge-relayers" -description = "Module used to store relayer rewards and coordinate relayers set." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Bridge dependencies - -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-relayers = { path = "../../primitives/relayers", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -pallet-bridge-messages = { path = "../messages", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-arithmetic = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -bp-runtime = { path = "../../primitives/runtime" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-relayers/std", - "bp-runtime/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-bridge-messages/std", - "scale-info/std", - "sp-arithmetic/std", - "sp-runtime/std", - "sp-std/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-bridge-messages/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-balances/try-runtime", - "pallet-bridge-messages/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/modules/relayers/README.md b/modules/relayers/README.md deleted file mode 100644 index 656200f448651436d5f583acbf13a79860766321..0000000000000000000000000000000000000000 --- a/modules/relayers/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Bridge Relayers Pallet - -The pallet serves as a storage for pending bridge relayer rewards. Any runtime component may register reward -to some relayer for doing some useful job at some messages lane. Later, the relayer may claim its rewards -using the `claim_rewards` call. - -The reward payment procedure is abstracted from the pallet code. One of possible implementations, is the -[`PayLaneRewardFromAccount`](../../primitives/relayers/src/lib.rs), which just does a `Currency::transfer` -call to relayer account from the relayer-rewards account, determined by the message lane id. - -We have two examples of how this pallet is used in production. Rewards are registered at the target chain to -compensate fees of message delivery transactions (and linked finality delivery calls). At the source chain, rewards -are registered during delivery confirmation transactions. You may find more information about that in the -[Kusama <> Polkadot bridge](../../docs/polkadot-kusama-bridge-overview.md) documentation. diff --git a/modules/relayers/src/benchmarking.rs b/modules/relayers/src/benchmarking.rs deleted file mode 100644 index ca312d44edfddd286eae1715655d538b6b00f070..0000000000000000000000000000000000000000 --- a/modules/relayers/src/benchmarking.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Benchmarks for the relayers Pallet. - -#![cfg(feature = "runtime-benchmarks")] - -use crate::*; - -use bp_messages::LaneId; -use bp_relayers::RewardsAccountOwner; -use frame_benchmarking::{benchmarks, whitelisted_caller}; -use frame_system::RawOrigin; -use sp_runtime::traits::One; - -/// Reward amount that is (hopefully) is larger than existential deposit across all chains. -const REWARD_AMOUNT: u32 = u32::MAX; - -/// Pallet we're benchmarking here. -pub struct Pallet(crate::Pallet); - -/// Trait that must be implemented by runtime. -pub trait Config: crate::Config { - /// Prepare environment for paying given reward for serving given lane. - fn prepare_rewards_account(account_params: RewardsAccountParams, reward: Self::Reward); - /// Give enough balance to given account. - fn deposit_account(account: Self::AccountId, balance: Self::Reward); -} - -benchmarks! { - // Benchmark `claim_rewards` call. - claim_rewards { - let lane = LaneId([0, 0, 0, 0]); - let account_params = - RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); - let relayer: T::AccountId = whitelisted_caller(); - let reward = T::Reward::from(REWARD_AMOUNT); - - T::prepare_rewards_account(account_params, reward); - RelayerRewards::::insert(&relayer, account_params, reward); - }: _(RawOrigin::Signed(relayer), account_params) - verify { - // we can't check anything here, because `PaymentProcedure` is responsible for - // payment logic, so we assume that if call has succeeded, the procedure has - // also completed successfully - } - - // Benchmark `register` call. - register { - let relayer: T::AccountId = whitelisted_caller(); - let valid_till = frame_system::Pallet::::block_number() - .saturating_add(crate::Pallet::::required_registration_lease()) - .saturating_add(One::one()) - .saturating_add(One::one()); - - T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - }: _(RawOrigin::Signed(relayer.clone()), valid_till) - verify { - assert!(crate::Pallet::::is_registration_active(&relayer)); - } - - // Benchmark `deregister` call. - deregister { - let relayer: T::AccountId = whitelisted_caller(); - let valid_till = frame_system::Pallet::::block_number() - .saturating_add(crate::Pallet::::required_registration_lease()) - .saturating_add(One::one()) - .saturating_add(One::one()); - T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap(); - - frame_system::Pallet::::set_block_number(valid_till.saturating_add(One::one())); - }: _(RawOrigin::Signed(relayer.clone())) - verify { - assert!(!crate::Pallet::::is_registration_active(&relayer)); - } - - // Benchmark `slash_and_deregister` method of the pallet. We are adding this weight to - // the weight of message delivery call if `RefundBridgedParachainMessages` signed extension - // is deployed at runtime level. - slash_and_deregister { - // prepare and register relayer account - let relayer: T::AccountId = whitelisted_caller(); - let valid_till = frame_system::Pallet::::block_number() - .saturating_add(crate::Pallet::::required_registration_lease()) - .saturating_add(One::one()) - .saturating_add(One::one()); - T::deposit_account(relayer.clone(), crate::Pallet::::required_stake()); - crate::Pallet::::register(RawOrigin::Signed(relayer.clone()).into(), valid_till).unwrap(); - - // create slash destination account - let lane = LaneId([0, 0, 0, 0]); - let slash_destination = RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); - T::prepare_rewards_account(slash_destination, Zero::zero()); - }: { - crate::Pallet::::slash_and_deregister(&relayer, slash_destination.into()) - } - verify { - assert!(!crate::Pallet::::is_registration_active(&relayer)); - } - - // Benchmark `register_relayer_reward` method of the pallet. We are adding this weight to - // the weight of message delivery call if `RefundBridgedParachainMessages` signed extension - // is deployed at runtime level. - register_relayer_reward { - let lane = LaneId([0, 0, 0, 0]); - let relayer: T::AccountId = whitelisted_caller(); - let account_params = - RewardsAccountParams::new(lane, *b"test", RewardsAccountOwner::ThisChain); - }: { - crate::Pallet::::register_relayer_reward(account_params, &relayer, One::one()); - } - verify { - assert_eq!(RelayerRewards::::get(relayer, &account_params), Some(One::one())); - } - - impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::TestRuntime) -} diff --git a/modules/relayers/src/lib.rs b/modules/relayers/src/lib.rs deleted file mode 100644 index 7a3a0f9ea94cbe5768bf6ee8c850355193ea44f0..0000000000000000000000000000000000000000 --- a/modules/relayers/src/lib.rs +++ /dev/null @@ -1,923 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Runtime module that is used to store relayer rewards and (in the future) to -//! coordinate relations between relayers. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] - -use bp_relayers::{ - ExplicitOrAccountParams, PaymentProcedure, Registration, RelayerRewardsKeyProvider, - RewardsAccountParams, StakeAndSlash, -}; -use bp_runtime::StorageDoubleMapKeyProvider; -use frame_support::fail; -use sp_arithmetic::traits::{AtLeast32BitUnsigned, Zero}; -use sp_runtime::{traits::CheckedSub, Saturating}; -use sp_std::marker::PhantomData; - -pub use pallet::*; -pub use payment_adapter::DeliveryConfirmationPaymentsAdapter; -pub use stake_adapter::StakeAndSlashNamed; -pub use weights::WeightInfo; -pub use weights_ext::WeightInfoExt; - -pub mod benchmarking; - -mod mock; -mod payment_adapter; -mod stake_adapter; -mod weights_ext; - -pub mod weights; - -/// The target that will be used when publishing logs related to this pallet. -pub const LOG_TARGET: &str = "runtime::bridge-relayers"; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - /// `RelayerRewardsKeyProvider` for given configuration. - type RelayerRewardsKeyProviderOf = - RelayerRewardsKeyProvider<::AccountId, ::Reward>; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// The overarching event type. - type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// Type of relayer reward. - type Reward: AtLeast32BitUnsigned + Copy + Parameter + MaxEncodedLen; - /// Pay rewards scheme. - type PaymentProcedure: PaymentProcedure; - /// Stake and slash scheme. - type StakeAndSlash: StakeAndSlash, Self::Reward>; - /// Pallet call weights. - type WeightInfo: WeightInfoExt; - } - - #[pallet::pallet] - pub struct Pallet(PhantomData); - - #[pallet::call] - impl Pallet { - /// Claim accumulated rewards. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::claim_rewards())] - pub fn claim_rewards( - origin: OriginFor, - rewards_account_params: RewardsAccountParams, - ) -> DispatchResult { - let relayer = ensure_signed(origin)?; - - RelayerRewards::::try_mutate_exists( - &relayer, - rewards_account_params, - |maybe_reward| -> DispatchResult { - let reward = maybe_reward.take().ok_or(Error::::NoRewardForRelayer)?; - T::PaymentProcedure::pay_reward(&relayer, rewards_account_params, reward) - .map_err(|e| { - log::trace!( - target: LOG_TARGET, - "Failed to pay {:?} rewards to {:?}: {:?}", - rewards_account_params, - relayer, - e, - ); - Error::::FailedToPayReward - })?; - - Self::deposit_event(Event::::RewardPaid { - relayer: relayer.clone(), - rewards_account_params, - reward, - }); - Ok(()) - }, - ) - } - - /// Register relayer or update its registration. - /// - /// Registration allows relayer to get priority boost for its message delivery transactions. - #[pallet::call_index(1)] - #[pallet::weight(T::WeightInfo::register())] - pub fn register(origin: OriginFor, valid_till: BlockNumberFor) -> DispatchResult { - let relayer = ensure_signed(origin)?; - - // valid till must be larger than the current block number and the lease must be larger - // than the `RequiredRegistrationLease` - let lease = valid_till.saturating_sub(frame_system::Pallet::::block_number()); - ensure!( - lease > Pallet::::required_registration_lease(), - Error::::InvalidRegistrationLease - ); - - RegisteredRelayers::::try_mutate(&relayer, |maybe_registration| -> DispatchResult { - let mut registration = maybe_registration - .unwrap_or_else(|| Registration { valid_till, stake: Zero::zero() }); - - // new `valid_till` must be larger (or equal) than the old one - ensure!( - valid_till >= registration.valid_till, - Error::::CannotReduceRegistrationLease, - ); - registration.valid_till = valid_till; - - // regarding stake, there are three options: - // - if relayer stake is larger than required stake, we may do unreserve - // - if relayer stake equals to required stake, we do nothing - // - if relayer stake is smaller than required stake, we do additional reserve - let required_stake = Pallet::::required_stake(); - if let Some(to_unreserve) = registration.stake.checked_sub(&required_stake) { - Self::do_unreserve(&relayer, to_unreserve)?; - } else if let Some(to_reserve) = required_stake.checked_sub(®istration.stake) { - T::StakeAndSlash::reserve(&relayer, to_reserve).map_err(|e| { - log::trace!( - target: LOG_TARGET, - "Failed to reserve {:?} on relayer {:?} account: {:?}", - to_reserve, - relayer, - e, - ); - - Error::::FailedToReserve - })?; - } - registration.stake = required_stake; - - log::trace!(target: LOG_TARGET, "Successfully registered relayer: {:?}", relayer); - Self::deposit_event(Event::::RegistrationUpdated { - relayer: relayer.clone(), - registration, - }); - - *maybe_registration = Some(registration); - - Ok(()) - }) - } - - /// `Deregister` relayer. - /// - /// After this call, message delivery transactions of the relayer won't get any priority - /// boost. - #[pallet::call_index(2)] - #[pallet::weight(T::WeightInfo::deregister())] - pub fn deregister(origin: OriginFor) -> DispatchResult { - let relayer = ensure_signed(origin)?; - - RegisteredRelayers::::try_mutate(&relayer, |maybe_registration| -> DispatchResult { - let registration = match maybe_registration.take() { - Some(registration) => registration, - None => fail!(Error::::NotRegistered), - }; - - // we can't deregister until `valid_till + 1` - ensure!( - registration.valid_till < frame_system::Pallet::::block_number(), - Error::::RegistrationIsStillActive, - ); - - // if stake is non-zero, we should do unreserve - if !registration.stake.is_zero() { - Self::do_unreserve(&relayer, registration.stake)?; - } - - log::trace!(target: LOG_TARGET, "Successfully deregistered relayer: {:?}", relayer); - Self::deposit_event(Event::::Deregistered { relayer: relayer.clone() }); - - *maybe_registration = None; - - Ok(()) - }) - } - } - - impl Pallet { - /// Returns true if given relayer registration is active at current block. - /// - /// This call respects both `RequiredStake` and `RequiredRegistrationLease`, meaning that - /// it'll return false if registered stake is lower than required or if remaining lease - /// is less than `RequiredRegistrationLease`. - pub fn is_registration_active(relayer: &T::AccountId) -> bool { - let registration = match Self::registered_relayer(relayer) { - Some(registration) => registration, - None => return false, - }; - - // registration is inactive if relayer stake is less than required - if registration.stake < Self::required_stake() { - return false - } - - // registration is inactive if it ends soon - let remaining_lease = registration - .valid_till - .saturating_sub(frame_system::Pallet::::block_number()); - if remaining_lease <= Self::required_registration_lease() { - return false - } - - true - } - - /// Slash and `deregister` relayer. This function slashes all staked balance. - /// - /// It may fail inside, but error is swallowed and we only log it. - pub fn slash_and_deregister( - relayer: &T::AccountId, - slash_destination: ExplicitOrAccountParams, - ) { - let registration = match RegisteredRelayers::::take(relayer) { - Some(registration) => registration, - None => { - log::trace!( - target: crate::LOG_TARGET, - "Cannot slash unregistered relayer {:?}", - relayer, - ); - - return - }, - }; - - match T::StakeAndSlash::repatriate_reserved( - relayer, - slash_destination.clone(), - registration.stake, - ) { - Ok(failed_to_slash) if failed_to_slash.is_zero() => { - log::trace!( - target: crate::LOG_TARGET, - "Relayer account {:?} has been slashed for {:?}. Funds were deposited to {:?}", - relayer, - registration.stake, - slash_destination, - ); - }, - Ok(failed_to_slash) => { - log::trace!( - target: crate::LOG_TARGET, - "Relayer account {:?} has been partially slashed for {:?}. Funds were deposited to {:?}. \ - Failed to slash: {:?}", - relayer, - registration.stake, - slash_destination, - failed_to_slash, - ); - }, - Err(e) => { - // TODO: document this. Where? - - // it may fail if there's no beneficiary account. For us it means that this - // account must exists before we'll deploy the bridge - log::debug!( - target: crate::LOG_TARGET, - "Failed to slash relayer account {:?}: {:?}. Maybe beneficiary account doesn't exist? \ - Beneficiary: {:?}, amount: {:?}, failed to slash: {:?}", - relayer, - e, - slash_destination, - registration.stake, - registration.stake, - ); - }, - } - } - - /// Register reward for given relayer. - pub fn register_relayer_reward( - rewards_account_params: RewardsAccountParams, - relayer: &T::AccountId, - reward: T::Reward, - ) { - if reward.is_zero() { - return - } - - RelayerRewards::::mutate( - relayer, - rewards_account_params, - |old_reward: &mut Option| { - let new_reward = old_reward.unwrap_or_else(Zero::zero).saturating_add(reward); - *old_reward = Some(new_reward); - - log::trace!( - target: crate::LOG_TARGET, - "Relayer {:?} can now claim reward for serving payer {:?}: {:?}", - relayer, - rewards_account_params, - new_reward, - ); - - Self::deposit_event(Event::::RewardRegistered { - relayer: relayer.clone(), - rewards_account_params, - reward, - }); - }, - ); - } - - /// Return required registration lease. - pub(crate) fn required_registration_lease() -> BlockNumberFor { - , - T::Reward, - >>::RequiredRegistrationLease::get() - } - - /// Return required stake. - pub(crate) fn required_stake() -> T::Reward { - , - T::Reward, - >>::RequiredStake::get() - } - - /// `Unreserve` given amount on relayer account. - fn do_unreserve(relayer: &T::AccountId, amount: T::Reward) -> DispatchResult { - let failed_to_unreserve = T::StakeAndSlash::unreserve(relayer, amount); - if !failed_to_unreserve.is_zero() { - log::trace!( - target: LOG_TARGET, - "Failed to unreserve {:?}/{:?} on relayer {:?} account", - failed_to_unreserve, - amount, - relayer, - ); - - fail!(Error::::FailedToUnreserve) - } - - Ok(()) - } - } - - #[pallet::event] - #[pallet::generate_deposit(pub(super) fn deposit_event)] - pub enum Event { - /// Relayer reward has been registered and may be claimed later. - RewardRegistered { - /// Relayer account that can claim reward. - relayer: T::AccountId, - /// Relayer can claim reward from this account. - rewards_account_params: RewardsAccountParams, - /// Reward amount. - reward: T::Reward, - }, - /// Reward has been paid to the relayer. - RewardPaid { - /// Relayer account that has been rewarded. - relayer: T::AccountId, - /// Relayer has received reward from this account. - rewards_account_params: RewardsAccountParams, - /// Reward amount. - reward: T::Reward, - }, - /// Relayer registration has been added or updated. - RegistrationUpdated { - /// Relayer account that has been registered. - relayer: T::AccountId, - /// Relayer registration. - registration: Registration, T::Reward>, - }, - /// Relayer has been `deregistered`. - Deregistered { - /// Relayer account that has been `deregistered`. - relayer: T::AccountId, - }, - /// Relayer has been slashed and `deregistered`. - SlashedAndDeregistered { - /// Relayer account that has been `deregistered`. - relayer: T::AccountId, - /// Registration that was removed. - registration: Registration, T::Reward>, - }, - } - - #[pallet::error] - pub enum Error { - /// No reward can be claimed by given relayer. - NoRewardForRelayer, - /// Reward payment procedure has failed. - FailedToPayReward, - /// The relayer has tried to register for past block or registration lease - /// is too short. - InvalidRegistrationLease, - /// New registration lease is less than the previous one. - CannotReduceRegistrationLease, - /// Failed to reserve enough funds on relayer account. - FailedToReserve, - /// Failed to `unreserve` enough funds on relayer account. - FailedToUnreserve, - /// Cannot `deregister` if not registered. - NotRegistered, - /// Failed to `deregister` relayer, because lease is still active. - RegistrationIsStillActive, - } - - /// Map of the relayer => accumulated reward. - #[pallet::storage] - #[pallet::getter(fn relayer_reward)] - pub type RelayerRewards = StorageDoubleMap< - _, - as StorageDoubleMapKeyProvider>::Hasher1, - as StorageDoubleMapKeyProvider>::Key1, - as StorageDoubleMapKeyProvider>::Hasher2, - as StorageDoubleMapKeyProvider>::Key2, - as StorageDoubleMapKeyProvider>::Value, - OptionQuery, - >; - - /// Relayers that have reserved some of their balance to get free priority boost - /// for their message delivery transactions. - /// - /// Other relayers may submit transactions as well, but they will have default - /// priority and will be rejected (without significant tip) in case if registered - /// relayer is present. - #[pallet::storage] - #[pallet::getter(fn registered_relayer)] - pub type RegisteredRelayers = StorageMap< - _, - Blake2_128Concat, - T::AccountId, - Registration, T::Reward>, - OptionQuery, - >; -} - -#[cfg(test)] -mod tests { - use super::*; - use mock::{RuntimeEvent as TestEvent, *}; - - use crate::Event::{RewardPaid, RewardRegistered}; - use bp_messages::LaneId; - use bp_relayers::RewardsAccountOwner; - use frame_support::{ - assert_noop, assert_ok, - traits::fungible::{Inspect, Mutate}, - }; - use frame_system::{EventRecord, Pallet as System, Phase}; - use sp_runtime::DispatchError; - - fn get_ready_for_events() { - System::::set_block_number(1); - System::::reset_events(); - } - - #[test] - fn register_relayer_reward_emit_event() { - run_test(|| { - get_ready_for_events(); - - Pallet::::register_relayer_reward( - TEST_REWARDS_ACCOUNT_PARAMS, - ®ULAR_RELAYER, - 100, - ); - - // Check if the `RewardRegistered` event was emitted. - assert_eq!( - System::::events().last(), - Some(&EventRecord { - phase: Phase::Initialization, - event: TestEvent::Relayers(RewardRegistered { - relayer: REGULAR_RELAYER, - rewards_account_params: TEST_REWARDS_ACCOUNT_PARAMS, - reward: 100 - }), - topics: vec![], - }), - ); - }); - } - - #[test] - fn root_cant_claim_anything() { - run_test(|| { - assert_noop!( - Pallet::::claim_rewards( - RuntimeOrigin::root(), - TEST_REWARDS_ACCOUNT_PARAMS - ), - DispatchError::BadOrigin, - ); - }); - } - - #[test] - fn relayer_cant_claim_if_no_reward_exists() { - run_test(|| { - assert_noop!( - Pallet::::claim_rewards( - RuntimeOrigin::signed(REGULAR_RELAYER), - TEST_REWARDS_ACCOUNT_PARAMS - ), - Error::::NoRewardForRelayer, - ); - }); - } - - #[test] - fn relayer_cant_claim_if_payment_procedure_fails() { - run_test(|| { - RelayerRewards::::insert( - FAILING_RELAYER, - TEST_REWARDS_ACCOUNT_PARAMS, - 100, - ); - assert_noop!( - Pallet::::claim_rewards( - RuntimeOrigin::signed(FAILING_RELAYER), - TEST_REWARDS_ACCOUNT_PARAMS - ), - Error::::FailedToPayReward, - ); - }); - } - - #[test] - fn relayer_can_claim_reward() { - run_test(|| { - get_ready_for_events(); - - RelayerRewards::::insert( - REGULAR_RELAYER, - TEST_REWARDS_ACCOUNT_PARAMS, - 100, - ); - assert_ok!(Pallet::::claim_rewards( - RuntimeOrigin::signed(REGULAR_RELAYER), - TEST_REWARDS_ACCOUNT_PARAMS - )); - assert_eq!( - RelayerRewards::::get(REGULAR_RELAYER, TEST_REWARDS_ACCOUNT_PARAMS), - None - ); - - // Check if the `RewardPaid` event was emitted. - assert_eq!( - System::::events().last(), - Some(&EventRecord { - phase: Phase::Initialization, - event: TestEvent::Relayers(RewardPaid { - relayer: REGULAR_RELAYER, - rewards_account_params: TEST_REWARDS_ACCOUNT_PARAMS, - reward: 100 - }), - topics: vec![], - }), - ); - }); - } - - #[test] - fn pay_reward_from_account_actually_pays_reward() { - type Balances = pallet_balances::Pallet; - type PayLaneRewardFromAccount = bp_relayers::PayRewardFromAccount; - - run_test(|| { - let in_lane_0 = RewardsAccountParams::new( - LaneId([0, 0, 0, 0]), - *b"test", - RewardsAccountOwner::ThisChain, - ); - let out_lane_1 = RewardsAccountParams::new( - LaneId([0, 0, 0, 1]), - *b"test", - RewardsAccountOwner::BridgedChain, - ); - - let in_lane0_rewards_account = PayLaneRewardFromAccount::rewards_account(in_lane_0); - let out_lane1_rewards_account = PayLaneRewardFromAccount::rewards_account(out_lane_1); - - Balances::mint_into(&in_lane0_rewards_account, 100).unwrap(); - Balances::mint_into(&out_lane1_rewards_account, 100).unwrap(); - assert_eq!(Balances::balance(&in_lane0_rewards_account), 100); - assert_eq!(Balances::balance(&out_lane1_rewards_account), 100); - assert_eq!(Balances::balance(&1), 0); - - PayLaneRewardFromAccount::pay_reward(&1, in_lane_0, 100).unwrap(); - assert_eq!(Balances::balance(&in_lane0_rewards_account), 0); - assert_eq!(Balances::balance(&out_lane1_rewards_account), 100); - assert_eq!(Balances::balance(&1), 100); - - PayLaneRewardFromAccount::pay_reward(&1, out_lane_1, 100).unwrap(); - assert_eq!(Balances::balance(&in_lane0_rewards_account), 0); - assert_eq!(Balances::balance(&out_lane1_rewards_account), 0); - assert_eq!(Balances::balance(&1), 200); - }); - } - - #[test] - fn register_fails_if_valid_till_is_a_past_block() { - run_test(|| { - System::::set_block_number(100); - - assert_noop!( - Pallet::::register(RuntimeOrigin::signed(REGISTER_RELAYER), 50), - Error::::InvalidRegistrationLease, - ); - }); - } - - #[test] - fn register_fails_if_valid_till_lease_is_less_than_required() { - run_test(|| { - System::::set_block_number(100); - - assert_noop!( - Pallet::::register( - RuntimeOrigin::signed(REGISTER_RELAYER), - 99 + Lease::get() - ), - Error::::InvalidRegistrationLease, - ); - }); - } - - #[test] - fn register_works() { - run_test(|| { - get_ready_for_events(); - - assert_ok!(Pallet::::register( - RuntimeOrigin::signed(REGISTER_RELAYER), - 150 - )); - assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get()); - assert_eq!( - Pallet::::registered_relayer(REGISTER_RELAYER), - Some(Registration { valid_till: 150, stake: Stake::get() }), - ); - - assert_eq!( - System::::events().last(), - Some(&EventRecord { - phase: Phase::Initialization, - event: TestEvent::Relayers(Event::RegistrationUpdated { - relayer: REGISTER_RELAYER, - registration: Registration { valid_till: 150, stake: Stake::get() }, - }), - topics: vec![], - }), - ); - }); - } - - #[test] - fn register_fails_if_new_valid_till_is_lesser_than_previous() { - run_test(|| { - assert_ok!(Pallet::::register( - RuntimeOrigin::signed(REGISTER_RELAYER), - 150 - )); - - assert_noop!( - Pallet::::register(RuntimeOrigin::signed(REGISTER_RELAYER), 125), - Error::::CannotReduceRegistrationLease, - ); - }); - } - - #[test] - fn register_fails_if_it_cant_unreserve_some_balance_if_required_stake_decreases() { - run_test(|| { - RegisteredRelayers::::insert( - REGISTER_RELAYER, - Registration { valid_till: 150, stake: Stake::get() + 1 }, - ); - - assert_noop!( - Pallet::::register(RuntimeOrigin::signed(REGISTER_RELAYER), 150), - Error::::FailedToUnreserve, - ); - }); - } - - #[test] - fn register_unreserves_some_balance_if_required_stake_decreases() { - run_test(|| { - get_ready_for_events(); - - RegisteredRelayers::::insert( - REGISTER_RELAYER, - Registration { valid_till: 150, stake: Stake::get() + 1 }, - ); - TestStakeAndSlash::reserve(®ISTER_RELAYER, Stake::get() + 1).unwrap(); - assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get() + 1); - let free_balance = Balances::free_balance(REGISTER_RELAYER); - - assert_ok!(Pallet::::register( - RuntimeOrigin::signed(REGISTER_RELAYER), - 150 - )); - assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get()); - assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance + 1); - assert_eq!( - Pallet::::registered_relayer(REGISTER_RELAYER), - Some(Registration { valid_till: 150, stake: Stake::get() }), - ); - - assert_eq!( - System::::events().last(), - Some(&EventRecord { - phase: Phase::Initialization, - event: TestEvent::Relayers(Event::RegistrationUpdated { - relayer: REGISTER_RELAYER, - registration: Registration { valid_till: 150, stake: Stake::get() } - }), - topics: vec![], - }), - ); - }); - } - - #[test] - fn register_fails_if_it_cant_reserve_some_balance() { - run_test(|| { - Balances::set_balance(®ISTER_RELAYER, 0); - assert_noop!( - Pallet::::register(RuntimeOrigin::signed(REGISTER_RELAYER), 150), - Error::::FailedToReserve, - ); - }); - } - - #[test] - fn register_fails_if_it_cant_reserve_some_balance_if_required_stake_increases() { - run_test(|| { - RegisteredRelayers::::insert( - REGISTER_RELAYER, - Registration { valid_till: 150, stake: Stake::get() - 1 }, - ); - Balances::set_balance(®ISTER_RELAYER, 0); - - assert_noop!( - Pallet::::register(RuntimeOrigin::signed(REGISTER_RELAYER), 150), - Error::::FailedToReserve, - ); - }); - } - - #[test] - fn register_reserves_some_balance_if_required_stake_increases() { - run_test(|| { - get_ready_for_events(); - - RegisteredRelayers::::insert( - REGISTER_RELAYER, - Registration { valid_till: 150, stake: Stake::get() - 1 }, - ); - TestStakeAndSlash::reserve(®ISTER_RELAYER, Stake::get() - 1).unwrap(); - - let free_balance = Balances::free_balance(REGISTER_RELAYER); - assert_ok!(Pallet::::register( - RuntimeOrigin::signed(REGISTER_RELAYER), - 150 - )); - assert_eq!(Balances::reserved_balance(REGISTER_RELAYER), Stake::get()); - assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance - 1); - assert_eq!( - Pallet::::registered_relayer(REGISTER_RELAYER), - Some(Registration { valid_till: 150, stake: Stake::get() }), - ); - - assert_eq!( - System::::events().last(), - Some(&EventRecord { - phase: Phase::Initialization, - event: TestEvent::Relayers(Event::RegistrationUpdated { - relayer: REGISTER_RELAYER, - registration: Registration { valid_till: 150, stake: Stake::get() } - }), - topics: vec![], - }), - ); - }); - } - - #[test] - fn deregister_fails_if_not_registered() { - run_test(|| { - assert_noop!( - Pallet::::deregister(RuntimeOrigin::signed(REGISTER_RELAYER)), - Error::::NotRegistered, - ); - }); - } - - #[test] - fn deregister_fails_if_registration_is_still_active() { - run_test(|| { - assert_ok!(Pallet::::register( - RuntimeOrigin::signed(REGISTER_RELAYER), - 150 - )); - - System::::set_block_number(100); - - assert_noop!( - Pallet::::deregister(RuntimeOrigin::signed(REGISTER_RELAYER)), - Error::::RegistrationIsStillActive, - ); - }); - } - - #[test] - fn deregister_works() { - run_test(|| { - get_ready_for_events(); - - assert_ok!(Pallet::::register( - RuntimeOrigin::signed(REGISTER_RELAYER), - 150 - )); - - System::::set_block_number(151); - - let reserved_balance = Balances::reserved_balance(REGISTER_RELAYER); - let free_balance = Balances::free_balance(REGISTER_RELAYER); - assert_ok!(Pallet::::deregister(RuntimeOrigin::signed(REGISTER_RELAYER))); - assert_eq!( - Balances::reserved_balance(REGISTER_RELAYER), - reserved_balance - Stake::get() - ); - assert_eq!(Balances::free_balance(REGISTER_RELAYER), free_balance + Stake::get()); - - assert_eq!( - System::::events().last(), - Some(&EventRecord { - phase: Phase::Initialization, - event: TestEvent::Relayers(Event::Deregistered { relayer: REGISTER_RELAYER }), - topics: vec![], - }), - ); - }); - } - - #[test] - fn is_registration_active_is_false_for_unregistered_relayer() { - run_test(|| { - assert!(!Pallet::::is_registration_active(®ISTER_RELAYER)); - }); - } - - #[test] - fn is_registration_active_is_false_when_stake_is_too_low() { - run_test(|| { - RegisteredRelayers::::insert( - REGISTER_RELAYER, - Registration { valid_till: 150, stake: Stake::get() - 1 }, - ); - assert!(!Pallet::::is_registration_active(®ISTER_RELAYER)); - }); - } - - #[test] - fn is_registration_active_is_false_when_remaining_lease_is_too_low() { - run_test(|| { - System::::set_block_number(150 - Lease::get()); - - RegisteredRelayers::::insert( - REGISTER_RELAYER, - Registration { valid_till: 150, stake: Stake::get() }, - ); - assert!(!Pallet::::is_registration_active(®ISTER_RELAYER)); - }); - } - - #[test] - fn is_registration_active_is_true_when_relayer_is_properly_registeered() { - run_test(|| { - System::::set_block_number(150 - Lease::get()); - - RegisteredRelayers::::insert( - REGISTER_RELAYER, - Registration { valid_till: 151, stake: Stake::get() }, - ); - assert!(Pallet::::is_registration_active(®ISTER_RELAYER)); - }); - } -} diff --git a/modules/relayers/src/mock.rs b/modules/relayers/src/mock.rs deleted file mode 100644 index 667b10e5c125ed74bad2aa7796756f372578c2ce..0000000000000000000000000000000000000000 --- a/modules/relayers/src/mock.rs +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg(test)] - -use crate as pallet_bridge_relayers; - -use bp_messages::LaneId; -use bp_relayers::{ - PayRewardFromAccount, PaymentProcedure, RewardsAccountOwner, RewardsAccountParams, -}; -use frame_support::{ - derive_impl, parameter_types, traits::fungible::Mutate, weights::RuntimeDbWeight, -}; -use sp_runtime::BuildStorage; - -pub type AccountId = u64; -pub type Balance = u64; -pub type BlockNumber = u64; - -pub type TestStakeAndSlash = pallet_bridge_relayers::StakeAndSlashNamed< - AccountId, - BlockNumber, - Balances, - ReserveId, - Stake, - Lease, ->; - -type Block = frame_system::mocking::MockBlock; - -frame_support::construct_runtime! { - pub enum TestRuntime - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Event}, - Relayers: pallet_bridge_relayers::{Pallet, Call, Event}, - } -} - -parameter_types! { - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; - pub const ExistentialDeposit: Balance = 1; - pub const ReserveId: [u8; 8] = *b"brdgrlrs"; - pub const Stake: Balance = 1_000; - pub const Lease: BlockNumber = 8; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type Block = Block; - type AccountData = pallet_balances::AccountData; - type DbWeight = DbWeight; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] -impl pallet_balances::Config for TestRuntime { - type ReserveIdentifier = [u8; 8]; - type AccountStore = System; -} - -impl pallet_bridge_relayers::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type Reward = Balance; - type PaymentProcedure = TestPaymentProcedure; - type StakeAndSlash = TestStakeAndSlash; - type WeightInfo = (); -} - -#[cfg(feature = "runtime-benchmarks")] -impl pallet_bridge_relayers::benchmarking::Config for TestRuntime { - fn prepare_rewards_account(account_params: RewardsAccountParams, reward: Balance) { - let rewards_account = - bp_relayers::PayRewardFromAccount::::rewards_account( - account_params, - ); - Self::deposit_account(rewards_account, reward); - } - - fn deposit_account(account: Self::AccountId, balance: Self::Reward) { - Balances::mint_into(&account, balance.saturating_add(ExistentialDeposit::get())).unwrap(); - } -} - -/// Message lane that we're using in tests. -pub const TEST_REWARDS_ACCOUNT_PARAMS: RewardsAccountParams = - RewardsAccountParams::new(LaneId([0, 0, 0, 0]), *b"test", RewardsAccountOwner::ThisChain); - -/// Regular relayer that may receive rewards. -pub const REGULAR_RELAYER: AccountId = 1; - -/// Relayer that can't receive rewards. -pub const FAILING_RELAYER: AccountId = 2; - -/// Relayer that is able to register. -pub const REGISTER_RELAYER: AccountId = 42; - -/// Payment procedure that rejects payments to the `FAILING_RELAYER`. -pub struct TestPaymentProcedure; - -impl TestPaymentProcedure { - pub fn rewards_account(params: RewardsAccountParams) -> AccountId { - PayRewardFromAccount::<(), AccountId>::rewards_account(params) - } -} - -impl PaymentProcedure for TestPaymentProcedure { - type Error = (); - - fn pay_reward( - relayer: &AccountId, - _lane_id: RewardsAccountParams, - _reward: Balance, - ) -> Result<(), Self::Error> { - match *relayer { - FAILING_RELAYER => Err(()), - _ => Ok(()), - } - } -} - -/// Return test externalities to use in tests. -pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - sp_io::TestExternalities::new(t) -} - -/// Run pallet test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - new_test_ext().execute_with(|| { - Balances::mint_into(®ISTER_RELAYER, ExistentialDeposit::get() + 10 * Stake::get()) - .unwrap(); - - test() - }) -} diff --git a/modules/relayers/src/payment_adapter.rs b/modules/relayers/src/payment_adapter.rs deleted file mode 100644 index b2d9c676bddc493700a45fc957235dbb9516296b..0000000000000000000000000000000000000000 --- a/modules/relayers/src/payment_adapter.rs +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Code that allows relayers pallet to be used as a payment mechanism for the messages pallet. - -use crate::{Config, Pallet}; - -use bp_messages::{ - source_chain::{DeliveryConfirmationPayments, RelayersRewards}, - LaneId, MessageNonce, -}; -use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use frame_support::{sp_runtime::SaturatedConversion, traits::Get}; -use sp_arithmetic::traits::{Saturating, Zero}; -use sp_std::{collections::vec_deque::VecDeque, marker::PhantomData, ops::RangeInclusive}; - -/// Adapter that allows relayers pallet to be used as a delivery+dispatch payment mechanism -/// for the messages pallet. -pub struct DeliveryConfirmationPaymentsAdapter( - PhantomData<(T, MI, DeliveryReward)>, -); - -impl DeliveryConfirmationPayments - for DeliveryConfirmationPaymentsAdapter -where - T: Config + pallet_bridge_messages::Config, - MI: 'static, - DeliveryReward: Get, -{ - type Error = &'static str; - - fn pay_reward( - lane_id: LaneId, - messages_relayers: VecDeque>, - confirmation_relayer: &T::AccountId, - received_range: &RangeInclusive, - ) -> MessageNonce { - let relayers_rewards = - bp_messages::calc_relayers_rewards::(messages_relayers, received_range); - let rewarded_relayers = relayers_rewards.len(); - - register_relayers_rewards::( - confirmation_relayer, - relayers_rewards, - RewardsAccountParams::new( - lane_id, - T::BridgedChainId::get(), - RewardsAccountOwner::BridgedChain, - ), - DeliveryReward::get(), - ); - - rewarded_relayers as _ - } -} - -// Update rewards to given relayers, optionally rewarding confirmation relayer. -fn register_relayers_rewards( - confirmation_relayer: &T::AccountId, - relayers_rewards: RelayersRewards, - lane_id: RewardsAccountParams, - delivery_fee: T::Reward, -) { - // reward every relayer except `confirmation_relayer` - let mut confirmation_relayer_reward = T::Reward::zero(); - for (relayer, messages) in relayers_rewards { - // sane runtime configurations guarantee that the number of messages will be below - // `u32::MAX` - let relayer_reward = T::Reward::saturated_from(messages).saturating_mul(delivery_fee); - - if relayer != *confirmation_relayer { - Pallet::::register_relayer_reward(lane_id, &relayer, relayer_reward); - } else { - confirmation_relayer_reward = - confirmation_relayer_reward.saturating_add(relayer_reward); - } - } - - // finally - pay reward to confirmation relayer - Pallet::::register_relayer_reward( - lane_id, - confirmation_relayer, - confirmation_relayer_reward, - ); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{mock::*, RelayerRewards}; - - const RELAYER_1: AccountId = 1; - const RELAYER_2: AccountId = 2; - const RELAYER_3: AccountId = 3; - - fn relayers_rewards() -> RelayersRewards { - vec![(RELAYER_1, 2), (RELAYER_2, 3)].into_iter().collect() - } - - #[test] - fn confirmation_relayer_is_rewarded_if_it_has_also_delivered_messages() { - run_test(|| { - register_relayers_rewards::( - &RELAYER_2, - relayers_rewards(), - TEST_REWARDS_ACCOUNT_PARAMS, - 50, - ); - - assert_eq!( - RelayerRewards::::get(RELAYER_1, TEST_REWARDS_ACCOUNT_PARAMS), - Some(100) - ); - assert_eq!( - RelayerRewards::::get(RELAYER_2, TEST_REWARDS_ACCOUNT_PARAMS), - Some(150) - ); - }); - } - - #[test] - fn confirmation_relayer_is_not_rewarded_if_it_has_not_delivered_any_messages() { - run_test(|| { - register_relayers_rewards::( - &RELAYER_3, - relayers_rewards(), - TEST_REWARDS_ACCOUNT_PARAMS, - 50, - ); - - assert_eq!( - RelayerRewards::::get(RELAYER_1, TEST_REWARDS_ACCOUNT_PARAMS), - Some(100) - ); - assert_eq!( - RelayerRewards::::get(RELAYER_2, TEST_REWARDS_ACCOUNT_PARAMS), - Some(150) - ); - assert_eq!( - RelayerRewards::::get(RELAYER_3, TEST_REWARDS_ACCOUNT_PARAMS), - None - ); - }); - } -} diff --git a/modules/relayers/src/stake_adapter.rs b/modules/relayers/src/stake_adapter.rs deleted file mode 100644 index 7ba90d91dfd94e49bf0ff6ee8fcc06f80e287c41..0000000000000000000000000000000000000000 --- a/modules/relayers/src/stake_adapter.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Code that allows `NamedReservableCurrency` to be used as a `StakeAndSlash` -//! mechanism of the relayers pallet. - -use bp_relayers::{ExplicitOrAccountParams, PayRewardFromAccount, StakeAndSlash}; -use codec::Codec; -use frame_support::traits::{tokens::BalanceStatus, NamedReservableCurrency}; -use sp_runtime::{traits::Get, DispatchError, DispatchResult}; -use sp_std::{fmt::Debug, marker::PhantomData}; - -/// `StakeAndSlash` that works with `NamedReservableCurrency` and uses named -/// reservations. -/// -/// **WARNING**: this implementation assumes that the relayers pallet is configured to -/// use the [`bp_relayers::PayRewardFromAccount`] as its relayers payment scheme. -pub struct StakeAndSlashNamed( - PhantomData<(AccountId, BlockNumber, Currency, ReserveId, Stake, Lease)>, -); - -impl - StakeAndSlash - for StakeAndSlashNamed -where - AccountId: Codec + Debug, - Currency: NamedReservableCurrency, - ReserveId: Get, - Stake: Get, - Lease: Get, -{ - type RequiredStake = Stake; - type RequiredRegistrationLease = Lease; - - fn reserve(relayer: &AccountId, amount: Currency::Balance) -> DispatchResult { - Currency::reserve_named(&ReserveId::get(), relayer, amount) - } - - fn unreserve(relayer: &AccountId, amount: Currency::Balance) -> Currency::Balance { - Currency::unreserve_named(&ReserveId::get(), relayer, amount) - } - - fn repatriate_reserved( - relayer: &AccountId, - beneficiary: ExplicitOrAccountParams, - amount: Currency::Balance, - ) -> Result { - let beneficiary_account = match beneficiary { - ExplicitOrAccountParams::Explicit(account) => account, - ExplicitOrAccountParams::Params(params) => - PayRewardFromAccount::<(), AccountId>::rewards_account(params), - }; - Currency::repatriate_reserved_named( - &ReserveId::get(), - relayer, - &beneficiary_account, - amount, - BalanceStatus::Free, - ) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - - use frame_support::traits::fungible::Mutate; - - fn test_stake() -> Balance { - Stake::get() - } - - #[test] - fn reserve_works() { - run_test(|| { - assert!(TestStakeAndSlash::reserve(&1, test_stake()).is_err()); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 0); - - Balances::mint_into(&2, test_stake() - 1).unwrap(); - assert!(TestStakeAndSlash::reserve(&2, test_stake()).is_err()); - assert_eq!(Balances::free_balance(2), test_stake() - 1); - assert_eq!(Balances::reserved_balance(2), 0); - - Balances::mint_into(&3, test_stake() * 2).unwrap(); - assert_eq!(TestStakeAndSlash::reserve(&3, test_stake()), Ok(())); - assert_eq!(Balances::free_balance(3), test_stake()); - assert_eq!(Balances::reserved_balance(3), test_stake()); - }) - } - - #[test] - fn unreserve_works() { - run_test(|| { - assert_eq!(TestStakeAndSlash::unreserve(&1, test_stake()), test_stake()); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 0); - - Balances::mint_into(&2, test_stake() * 2).unwrap(); - TestStakeAndSlash::reserve(&2, test_stake() / 3).unwrap(); - assert_eq!( - TestStakeAndSlash::unreserve(&2, test_stake()), - test_stake() - test_stake() / 3 - ); - assert_eq!(Balances::free_balance(2), test_stake() * 2); - assert_eq!(Balances::reserved_balance(2), 0); - - Balances::mint_into(&3, test_stake() * 2).unwrap(); - TestStakeAndSlash::reserve(&3, test_stake()).unwrap(); - assert_eq!(TestStakeAndSlash::unreserve(&3, test_stake()), 0); - assert_eq!(Balances::free_balance(3), test_stake() * 2); - assert_eq!(Balances::reserved_balance(3), 0); - }) - } - - #[test] - fn repatriate_reserved_works() { - run_test(|| { - let beneficiary = TEST_REWARDS_ACCOUNT_PARAMS; - let beneficiary_account = TestPaymentProcedure::rewards_account(beneficiary); - - let mut expected_balance = ExistentialDeposit::get(); - Balances::mint_into(&beneficiary_account, expected_balance).unwrap(); - - assert_eq!( - TestStakeAndSlash::repatriate_reserved( - &1, - ExplicitOrAccountParams::Params(beneficiary), - test_stake() - ), - Ok(test_stake()) - ); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(beneficiary_account), expected_balance); - assert_eq!(Balances::reserved_balance(beneficiary_account), 0); - - expected_balance += test_stake() / 3; - Balances::mint_into(&2, test_stake() * 2).unwrap(); - TestStakeAndSlash::reserve(&2, test_stake() / 3).unwrap(); - assert_eq!( - TestStakeAndSlash::repatriate_reserved( - &2, - ExplicitOrAccountParams::Params(beneficiary), - test_stake() - ), - Ok(test_stake() - test_stake() / 3) - ); - assert_eq!(Balances::free_balance(2), test_stake() * 2 - test_stake() / 3); - assert_eq!(Balances::reserved_balance(2), 0); - assert_eq!(Balances::free_balance(beneficiary_account), expected_balance); - assert_eq!(Balances::reserved_balance(beneficiary_account), 0); - - expected_balance += test_stake(); - Balances::mint_into(&3, test_stake() * 2).unwrap(); - TestStakeAndSlash::reserve(&3, test_stake()).unwrap(); - assert_eq!( - TestStakeAndSlash::repatriate_reserved( - &3, - ExplicitOrAccountParams::Params(beneficiary), - test_stake() - ), - Ok(0) - ); - assert_eq!(Balances::free_balance(3), test_stake()); - assert_eq!(Balances::reserved_balance(3), 0); - assert_eq!(Balances::free_balance(beneficiary_account), expected_balance); - assert_eq!(Balances::reserved_balance(beneficiary_account), 0); - }) - } - - #[test] - fn repatriate_reserved_doesnt_work_when_beneficiary_account_is_missing() { - run_test(|| { - let beneficiary = TEST_REWARDS_ACCOUNT_PARAMS; - let beneficiary_account = TestPaymentProcedure::rewards_account(beneficiary); - - Balances::mint_into(&3, test_stake() * 2).unwrap(); - TestStakeAndSlash::reserve(&3, test_stake()).unwrap(); - assert!(TestStakeAndSlash::repatriate_reserved( - &3, - ExplicitOrAccountParams::Params(beneficiary), - test_stake() - ) - .is_err()); - assert_eq!(Balances::free_balance(3), test_stake()); - assert_eq!(Balances::reserved_balance(3), test_stake()); - assert_eq!(Balances::free_balance(beneficiary_account), 0); - assert_eq!(Balances::reserved_balance(beneficiary_account), 0); - }); - } -} diff --git a/modules/relayers/src/weights.rs b/modules/relayers/src/weights.rs deleted file mode 100644 index c2c065b0c0a270a254a60dccb62465d6c2fa4aa6..0000000000000000000000000000000000000000 --- a/modules/relayers/src/weights.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for pallet_bridge_relayers -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-04-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// target/release/rip-bridge-node -// benchmark -// pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_bridge_relayers -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/relayers/src/weights.rs -// --template=./.maintain/bridge-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_bridge_relayers. -pub trait WeightInfo { - fn claim_rewards() -> Weight; - fn register() -> Weight; - fn deregister() -> Weight; - fn slash_and_deregister() -> Weight; - fn register_relayer_reward() -> Weight; -} - -/// Weights for `pallet_bridge_relayers` that are generated using one of the Bridge testnets. -/// -/// Those weights are test only and must never be used in production. -pub struct BridgeWeight(PhantomData); -impl WeightInfo for BridgeWeight { - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances TotalIssuance (r:1 w:0) - /// - /// Proof: Balances TotalIssuance (max_values: Some(1), max_size: Some(8), added: 503, mode: - /// MaxEncodedLen) - /// - /// Storage: System Account (r:1 w:1) - /// - /// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode: - /// MaxEncodedLen) - fn claim_rewards() -> Weight { - // Proof Size summary in bytes: - // Measured: `294` - // Estimated: `8592` - // Minimum execution time: 77_614 nanoseconds. - Weight::from_parts(79_987_000, 8592) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1) - /// - /// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances Reserves (r:1 w:1) - /// - /// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode: - /// MaxEncodedLen) - fn register() -> Weight { - // Proof Size summary in bytes: - // Measured: `87` - // Estimated: `7843` - // Minimum execution time: 39_590 nanoseconds. - Weight::from_parts(40_546_000, 7843) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1) - /// - /// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances Reserves (r:1 w:1) - /// - /// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode: - /// MaxEncodedLen) - fn deregister() -> Weight { - // Proof Size summary in bytes: - // Measured: `264` - // Estimated: `7843` - // Minimum execution time: 43_332 nanoseconds. - Weight::from_parts(45_087_000, 7843) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(2_u64)) - } - /// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1) - /// - /// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances Reserves (r:1 w:1) - /// - /// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode: - /// MaxEncodedLen) - /// - /// Storage: System Account (r:1 w:1) - /// - /// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode: - /// MaxEncodedLen) - fn slash_and_deregister() -> Weight { - // Proof Size summary in bytes: - // Measured: `380` - // Estimated: `11412` - // Minimum execution time: 42_358 nanoseconds. - Weight::from_parts(43_539_000, 11412) - .saturating_add(T::DbWeight::get().reads(3_u64)) - .saturating_add(T::DbWeight::get().writes(3_u64)) - } - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn register_relayer_reward() -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `3530` - // Minimum execution time: 6_338 nanoseconds. - Weight::from_parts(6_526_000, 3530) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances TotalIssuance (r:1 w:0) - /// - /// Proof: Balances TotalIssuance (max_values: Some(1), max_size: Some(8), added: 503, mode: - /// MaxEncodedLen) - /// - /// Storage: System Account (r:1 w:1) - /// - /// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode: - /// MaxEncodedLen) - fn claim_rewards() -> Weight { - // Proof Size summary in bytes: - // Measured: `294` - // Estimated: `8592` - // Minimum execution time: 77_614 nanoseconds. - Weight::from_parts(79_987_000, 8592) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1) - /// - /// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances Reserves (r:1 w:1) - /// - /// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode: - /// MaxEncodedLen) - fn register() -> Weight { - // Proof Size summary in bytes: - // Measured: `87` - // Estimated: `7843` - // Minimum execution time: 39_590 nanoseconds. - Weight::from_parts(40_546_000, 7843) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1) - /// - /// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances Reserves (r:1 w:1) - /// - /// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode: - /// MaxEncodedLen) - fn deregister() -> Weight { - // Proof Size summary in bytes: - // Measured: `264` - // Estimated: `7843` - // Minimum execution time: 43_332 nanoseconds. - Weight::from_parts(45_087_000, 7843) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(2_u64)) - } - /// Storage: BridgeRelayers RegisteredRelayers (r:1 w:1) - /// - /// Proof: BridgeRelayers RegisteredRelayers (max_values: None, max_size: Some(64), added: 2539, - /// mode: MaxEncodedLen) - /// - /// Storage: Balances Reserves (r:1 w:1) - /// - /// Proof: Balances Reserves (max_values: None, max_size: Some(849), added: 3324, mode: - /// MaxEncodedLen) - /// - /// Storage: System Account (r:1 w:1) - /// - /// Proof: System Account (max_values: None, max_size: Some(104), added: 2579, mode: - /// MaxEncodedLen) - fn slash_and_deregister() -> Weight { - // Proof Size summary in bytes: - // Measured: `380` - // Estimated: `11412` - // Minimum execution time: 42_358 nanoseconds. - Weight::from_parts(43_539_000, 11412) - .saturating_add(RocksDbWeight::get().reads(3_u64)) - .saturating_add(RocksDbWeight::get().writes(3_u64)) - } - /// Storage: BridgeRelayers RelayerRewards (r:1 w:1) - /// - /// Proof: BridgeRelayers RelayerRewards (max_values: None, max_size: Some(65), added: 2540, - /// mode: MaxEncodedLen) - fn register_relayer_reward() -> Weight { - // Proof Size summary in bytes: - // Measured: `12` - // Estimated: `3530` - // Minimum execution time: 6_338 nanoseconds. - Weight::from_parts(6_526_000, 3530) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } -} diff --git a/modules/relayers/src/weights_ext.rs b/modules/relayers/src/weights_ext.rs deleted file mode 100644 index 9cd25c47c3782f709249dd9b1fd24e4c6ec8ab7f..0000000000000000000000000000000000000000 --- a/modules/relayers/src/weights_ext.rs +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Weight-related utilities. - -use crate::weights::WeightInfo; - -use frame_support::pallet_prelude::Weight; - -/// Extended weight info. -pub trait WeightInfoExt: WeightInfo { - /// Returns weight, that needs to be added to the pre-dispatch weight of message delivery call, - /// if `RefundBridgedParachainMessages` signed extension is deployed at runtime level. - fn receive_messages_proof_overhead_from_runtime() -> Weight { - Self::slash_and_deregister().max(Self::register_relayer_reward()) - } - - /// Returns weight, that needs to be added to the pre-dispatch weight of message delivery - /// confirmation call, if `RefundBridgedParachainMessages` signed extension is deployed at - /// runtime level. - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - Self::register_relayer_reward() - } - - /// Returns weight that we need to deduct from the message delivery call weight that has - /// completed successfully. - /// - /// Usually, the weight of `slash_and_deregister` is larger than the weight of the - /// `register_relayer_reward`. So if relayer has been rewarded, we want to deduct the difference - /// to get the actual post-dispatch weight. - fn extra_weight_of_successful_receive_messages_proof_call() -> Weight { - Self::slash_and_deregister().saturating_sub(Self::register_relayer_reward()) - } -} - -impl WeightInfoExt for T {} diff --git a/modules/xcm-bridge-hub-router/Cargo.toml b/modules/xcm-bridge-hub-router/Cargo.toml deleted file mode 100644 index 280eeac942bae791a6a7ba8ac217d9b2903cb223..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub-router/Cargo.toml +++ /dev/null @@ -1,66 +0,0 @@ -[package] -name = "pallet-xcm-bridge-hub-router" -description = "Bridge hub interface for sibling/parent chains with dynamic fees support." -version = "0.5.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive", "serde"] } - -# Bridge dependencies - -bp-xcm-bridge-hub-router = { path = "../../primitives/xcm-bridge-hub-router", default-features = false } - -# Substrate Dependencies - -frame-benchmarking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, optional = true } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -# Polkadot Dependencies - -xcm = { package = "staging-xcm", git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -xcm-builder = { package = "staging-xcm-builder", git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "log/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", - "xcm-builder/std", - "xcm/std", -] -runtime-benchmarks = [ - "frame-benchmarking/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/modules/xcm-bridge-hub-router/src/benchmarking.rs b/modules/xcm-bridge-hub-router/src/benchmarking.rs deleted file mode 100644 index c4f9f534c1a479cd7dc4ba545353b9d92c45d2c8..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub-router/src/benchmarking.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! XCM bridge hub router pallet benchmarks. - -#![cfg(feature = "runtime-benchmarks")] - -use crate::{Bridge, Call}; - -use bp_xcm_bridge_hub_router::{BridgeState, MINIMAL_DELIVERY_FEE_FACTOR}; -use frame_benchmarking::{benchmarks_instance_pallet, BenchmarkError}; -use frame_support::traits::{EnsureOrigin, Get, Hooks, UnfilteredDispatchable}; -use sp_runtime::traits::Zero; -use xcm::prelude::*; - -/// Pallet we're benchmarking here. -pub struct Pallet, I: 'static = ()>(crate::Pallet); - -/// Trait that must be implemented by runtime to be able to benchmark pallet properly. -pub trait Config: crate::Config { - /// Fill up queue so it becomes congested. - fn make_congested(); - - /// Returns destination which is valid for this router instance. - /// (Needs to pass `T::Bridges`) - /// Make sure that `SendXcm` will pass. - fn ensure_bridged_target_destination() -> Result { - Ok(Location::new( - Self::UniversalLocation::get().len() as u8, - [GlobalConsensus(Self::BridgedNetworkId::get().unwrap())], - )) - } -} - -benchmarks_instance_pallet! { - on_initialize_when_non_congested { - Bridge::::put(BridgeState { - is_congested: false, - delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, - }); - }: { - crate::Pallet::::on_initialize(Zero::zero()) - } - - on_initialize_when_congested { - Bridge::::put(BridgeState { - is_congested: false, - delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR + MINIMAL_DELIVERY_FEE_FACTOR, - }); - - let _ = T::ensure_bridged_target_destination()?; - T::make_congested(); - }: { - crate::Pallet::::on_initialize(Zero::zero()) - } - - report_bridge_status { - Bridge::::put(BridgeState::default()); - - let origin: T::RuntimeOrigin = T::BridgeHubOrigin::try_successful_origin().expect("expected valid BridgeHubOrigin"); - let bridge_id = Default::default(); - let is_congested = true; - - let call = Call::::report_bridge_status { bridge_id, is_congested }; - }: { call.dispatch_bypass_filter(origin)? } - verify { - assert!(Bridge::::get().is_congested); - } - - send_message { - let dest = T::ensure_bridged_target_destination()?; - let xcm = sp_std::vec![].into(); - - // make local queue congested, because it means additional db write - T::make_congested(); - }: { - send_xcm::>(dest, xcm).expect("message is sent") - } - verify { - assert!(Bridge::::get().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR); - } -} diff --git a/modules/xcm-bridge-hub-router/src/lib.rs b/modules/xcm-bridge-hub-router/src/lib.rs deleted file mode 100644 index f219be78f9e1b5469fb752eed3f662c954d0ec42..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub-router/src/lib.rs +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Pallet that may be used instead of `SovereignPaidRemoteExporter` in the XCM router -//! configuration. The main thing that the pallet offers is the dynamic message fee, -//! that is computed based on the bridge queues state. It starts exponentially increasing -//! if the queue between this chain and the sibling/child bridge hub is congested. -//! -//! All other bridge hub queues offer some backpressure mechanisms. So if at least one -//! of all queues is congested, it will eventually lead to the growth of the queue at -//! this chain. -//! -//! **A note on terminology**: when we mention the bridge hub here, we mean the chain that -//! has the messages pallet deployed (`pallet-bridge-grandpa`, `pallet-bridge-messages`, -//! `pallet-xcm-bridge-hub`, ...). It may be the system bridge hub parachain or any other -//! chain. - -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_xcm_bridge_hub_router::{ - BridgeState, XcmChannelStatusProvider, MINIMAL_DELIVERY_FEE_FACTOR, -}; -use codec::Encode; -use frame_support::traits::Get; -use sp_core::H256; -use sp_runtime::{FixedPointNumber, FixedU128, Saturating}; -use xcm::prelude::*; -use xcm_builder::{ExporterFor, SovereignPaidRemoteExporter}; - -pub use pallet::*; -pub use weights::WeightInfo; - -pub mod benchmarking; -pub mod weights; - -mod mock; - -/// The factor that is used to increase current message fee factor when bridge experiencing -/// some lags. -const EXPONENTIAL_FEE_BASE: FixedU128 = FixedU128::from_rational(105, 100); // 1.05 -/// The factor that is used to increase current message fee factor for every sent kilobyte. -const MESSAGE_SIZE_FEE_BASE: FixedU128 = FixedU128::from_rational(1, 1000); // 0.001 - -/// Maximal size of the XCM message that may be sent over bridge. -/// -/// This should be less than the maximal size, allowed by the messages pallet, because -/// the message itself is wrapped in other structs and is double encoded. -pub const HARD_MESSAGE_SIZE_LIMIT: u32 = 32 * 1024; - -/// The target that will be used when publishing logs related to this pallet. -/// -/// This doesn't match the pattern used by other bridge pallets (`runtime::bridge-*`). But this -/// pallet has significant differences with those pallets. The main one is that is intended to -/// be deployed at sending chains. Other bridge pallets are likely to be deployed at the separate -/// bridge hub parachain. -pub const LOG_TARGET: &str = "xcm::bridge-hub-router"; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::*; - - #[pallet::config] - pub trait Config: frame_system::Config { - /// Benchmarks results from runtime we're plugged into. - type WeightInfo: WeightInfo; - - /// Universal location of this runtime. - type UniversalLocation: Get; - /// The bridged network that this config is for if specified. - /// Also used for filtering `Bridges` by `BridgedNetworkId`. - /// If not specified, allows all networks pass through. - type BridgedNetworkId: Get>; - /// Configuration for supported **bridged networks/locations** with **bridge location** and - /// **possible fee**. Allows to externalize better control over allowed **bridged - /// networks/locations**. - type Bridges: ExporterFor; - /// Checks the XCM version for the destination. - type DestinationVersion: GetVersion; - - /// Origin of the sibling bridge hub that is allowed to report bridge status. - type BridgeHubOrigin: EnsureOrigin; - /// Actual message sender (`HRMP` or `DMP`) to the sibling bridge hub location. - type ToBridgeHubSender: SendXcm; - /// Underlying channel with the sibling bridge hub. It must match the channel, used - /// by the `Self::ToBridgeHubSender`. - type WithBridgeHubChannel: XcmChannelStatusProvider; - - /// Additional fee that is paid for every byte of the outbound message. - type ByteFee: Get; - /// Asset that is used to paid bridge fee. - type FeeAsset: Get; - } - - #[pallet::pallet] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { - fn on_initialize(_n: BlockNumberFor) -> Weight { - // TODO: make sure that `WithBridgeHubChannel::is_congested` returns true if either - // of XCM channels (outbound/inbound) is suspended. Because if outbound is suspended - // that is definitely congestion. If inbound is suspended, then we are not able to - // receive the "report_bridge_status" signal (that maybe sent by the bridge hub). - - // if the channel with sibling/child bridge hub is suspended, we don't change - // anything - if T::WithBridgeHubChannel::is_congested() { - return T::WeightInfo::on_initialize_when_congested() - } - - // if bridge has reported congestion, we don't change anything - let mut bridge = Self::bridge(); - if bridge.is_congested { - return T::WeightInfo::on_initialize_when_congested() - } - - // if fee factor is already minimal, we don't change anything - if bridge.delivery_fee_factor == MINIMAL_DELIVERY_FEE_FACTOR { - return T::WeightInfo::on_initialize_when_congested() - } - - let previous_factor = bridge.delivery_fee_factor; - bridge.delivery_fee_factor = - MINIMAL_DELIVERY_FEE_FACTOR.max(bridge.delivery_fee_factor / EXPONENTIAL_FEE_BASE); - log::info!( - target: LOG_TARGET, - "Bridge queue is uncongested. Decreased fee factor from {} to {}", - previous_factor, - bridge.delivery_fee_factor, - ); - - Bridge::::put(bridge); - T::WeightInfo::on_initialize_when_non_congested() - } - } - - #[pallet::call] - impl, I: 'static> Pallet { - /// Notification about congested bridge queue. - #[pallet::call_index(0)] - #[pallet::weight(T::WeightInfo::report_bridge_status())] - pub fn report_bridge_status( - origin: OriginFor, - // this argument is not currently used, but to ease future migration, we'll keep it - // here - bridge_id: H256, - is_congested: bool, - ) -> DispatchResult { - let _ = T::BridgeHubOrigin::ensure_origin(origin)?; - - log::info!( - target: LOG_TARGET, - "Received bridge status from {:?}: congested = {}", - bridge_id, - is_congested, - ); - - Bridge::::mutate(|bridge| { - bridge.is_congested = is_congested; - }); - Ok(()) - } - } - - /// Bridge that we are using. - /// - /// **bridges-v1** assumptions: all outbound messages through this router are using single lane - /// and to single remote consensus. If there is some other remote consensus that uses the same - /// bridge hub, the separate pallet instance shall be used, In `v2` we'll have all required - /// primitives (lane-id aka bridge-id, derived from XCM locations) to support multiple bridges - /// by the same pallet instance. - #[pallet::storage] - #[pallet::getter(fn bridge)] - pub type Bridge, I: 'static = ()> = StorageValue<_, BridgeState, ValueQuery>; - - impl, I: 'static> Pallet { - /// Called when new message is sent (queued to local outbound XCM queue) over the bridge. - pub(crate) fn on_message_sent_to_bridge(message_size: u32) { - let _ = Bridge::::try_mutate(|bridge| { - let is_channel_with_bridge_hub_congested = T::WithBridgeHubChannel::is_congested(); - let is_bridge_congested = bridge.is_congested; - - // if outbound queue is not congested AND bridge has not reported congestion, do - // nothing - if !is_channel_with_bridge_hub_congested && !is_bridge_congested { - return Err(()) - } - - // ok - we need to increase the fee factor, let's do that - let message_size_factor = FixedU128::from_u32(message_size.saturating_div(1024)) - .saturating_mul(MESSAGE_SIZE_FEE_BASE); - let total_factor = EXPONENTIAL_FEE_BASE.saturating_add(message_size_factor); - let previous_factor = bridge.delivery_fee_factor; - bridge.delivery_fee_factor = - bridge.delivery_fee_factor.saturating_mul(total_factor); - - log::info!( - target: LOG_TARGET, - "Bridge channel is congested. Increased fee factor from {} to {}", - previous_factor, - bridge.delivery_fee_factor, - ); - - Ok(()) - }); - } - } -} - -/// We'll be using `SovereignPaidRemoteExporter` to send remote messages over the sibling/child -/// bridge hub. -type ViaBridgeHubExporter = SovereignPaidRemoteExporter< - Pallet, - >::ToBridgeHubSender, - >::UniversalLocation, ->; - -// This pallet acts as the `ExporterFor` for the `SovereignPaidRemoteExporter` to compute -// message fee using fee factor. -impl, I: 'static> ExporterFor for Pallet { - fn exporter_for( - network: &NetworkId, - remote_location: &InteriorLocation, - message: &Xcm<()>, - ) -> Option<(Location, Option)> { - // ensure that the message is sent to the expected bridged network (if specified). - if let Some(bridged_network) = T::BridgedNetworkId::get() { - if *network != bridged_network { - log::trace!( - target: LOG_TARGET, - "Router with bridged_network_id {:?} does not support bridging to network {:?}!", - bridged_network, - network, - ); - return None - } - } - - // ensure that the message is sent to the expected bridged network and location. - let Some((bridge_hub_location, maybe_payment)) = - T::Bridges::exporter_for(network, remote_location, message) - else { - log::trace!( - target: LOG_TARGET, - "Router with bridged_network_id {:?} does not support bridging to network {:?} and remote_location {:?}!", - T::BridgedNetworkId::get(), - network, - remote_location, - ); - return None - }; - - // take `base_fee` from `T::Brides`, but it has to be the same `T::FeeAsset` - let base_fee = match maybe_payment { - Some(payment) => match payment { - Asset { fun: Fungible(amount), id } if id.eq(&T::FeeAsset::get()) => amount, - invalid_asset => { - log::error!( - target: LOG_TARGET, - "Router with bridged_network_id {:?} is configured for `T::FeeAsset` {:?} which is not \ - compatible with {:?} for bridge_hub_location: {:?} for bridging to {:?}/{:?}!", - T::BridgedNetworkId::get(), - T::FeeAsset::get(), - invalid_asset, - bridge_hub_location, - network, - remote_location, - ); - return None - }, - }, - None => 0, - }; - - // compute fee amount. Keep in mind that this is only the bridge fee. The fee for sending - // message from this chain to child/sibling bridge hub is determined by the - // `Config::ToBridgeHubSender` - let message_size = message.encoded_size(); - let message_fee = (message_size as u128).saturating_mul(T::ByteFee::get()); - let fee_sum = base_fee.saturating_add(message_fee); - let fee_factor = Self::bridge().delivery_fee_factor; - let fee = fee_factor.saturating_mul_int(fee_sum); - - let fee = if fee > 0 { Some((T::FeeAsset::get(), fee).into()) } else { None }; - - log::info!( - target: LOG_TARGET, - "Going to send message to {:?} ({} bytes) over bridge. Computed bridge fee {:?} using fee factor {}", - (network, remote_location), - message_size, - fee, - fee_factor - ); - - Some((bridge_hub_location, fee)) - } -} - -// This pallet acts as the `SendXcm` to the sibling/child bridge hub instead of regular -// XCMP/DMP transport. This allows injecting dynamic message fees into XCM programs that -// are going to the bridged network. -impl, I: 'static> SendXcm for Pallet { - type Ticket = (u32, ::Ticket); - - fn validate( - dest: &mut Option, - xcm: &mut Option>, - ) -> SendResult { - // `dest` and `xcm` are required here - let dest_ref = dest.as_ref().ok_or(SendError::MissingArgument)?; - let xcm_ref = xcm.as_ref().ok_or(SendError::MissingArgument)?; - - // we won't have an access to `dest` and `xcm` in the `deliver` method, so precompute - // everything required here - let message_size = xcm_ref.encoded_size() as _; - - // bridge doesn't support oversized/overweight messages now. So it is better to drop such - // messages here than at the bridge hub. Let's check the message size. - if message_size > HARD_MESSAGE_SIZE_LIMIT { - return Err(SendError::ExceedsMaxMessageSize) - } - - // We need to ensure that the known `dest`'s XCM version can comprehend the current `xcm` - // program. This may seem like an additional, unnecessary check, but it is not. A similar - // check is probably performed by the `ViaBridgeHubExporter`, which attempts to send a - // versioned message to the sibling bridge hub. However, the local bridge hub may have a - // higher XCM version than the remote `dest`. Once again, it is better to discard such - // messages here than at the bridge hub (e.g., to avoid losing funds). - let destination_version = T::DestinationVersion::get_version_for(dest_ref) - .ok_or(SendError::DestinationUnsupported)?; - let _ = VersionedXcm::from(xcm_ref.clone()) - .into_version(destination_version) - .map_err(|()| SendError::DestinationUnsupported)?; - - // just use exporter to validate destination and insert instructions to pay message fee - // at the sibling/child bridge hub - // - // the cost will include both cost of: (1) to-sibling bridge hub delivery (returned by - // the `Config::ToBridgeHubSender`) and (2) to-bridged bridge hub delivery (returned by - // `Self::exporter_for`) - ViaBridgeHubExporter::::validate(dest, xcm) - .map(|(ticket, cost)| ((message_size, ticket), cost)) - } - - fn deliver(ticket: Self::Ticket) -> Result { - // use router to enqueue message to the sibling/child bridge hub. This also should handle - // payment for passing through this queue. - let (message_size, ticket) = ticket; - let xcm_hash = ViaBridgeHubExporter::::deliver(ticket)?; - - // increase delivery fee factor if required - Self::on_message_sent_to_bridge(message_size); - - Ok(xcm_hash) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use frame_support::assert_ok; - use mock::*; - - use frame_support::traits::Hooks; - use sp_runtime::traits::One; - - fn congested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { - BridgeState { is_congested: true, delivery_fee_factor } - } - - fn uncongested_bridge(delivery_fee_factor: FixedU128) -> BridgeState { - BridgeState { is_congested: false, delivery_fee_factor } - } - - #[test] - fn initial_fee_factor_is_one() { - run_test(|| { - assert_eq!( - Bridge::::get(), - uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR), - ); - }) - } - - #[test] - fn fee_factor_is_not_decreased_from_on_initialize_when_xcm_channel_is_congested() { - run_test(|| { - Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); - TestWithBridgeHubChannel::make_congested(); - - // it should not decrease, because xcm channel is congested - let old_bridge = XcmBridgeHubRouter::bridge(); - XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::bridge(), old_bridge); - }) - } - - #[test] - fn fee_factor_is_not_decreased_from_on_initialize_when_bridge_has_reported_congestion() { - run_test(|| { - Bridge::::put(congested_bridge(FixedU128::from_rational(125, 100))); - - // it should not decrease, because bridge congested - let old_bridge = XcmBridgeHubRouter::bridge(); - XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!(XcmBridgeHubRouter::bridge(), old_bridge); - }) - } - - #[test] - fn fee_factor_is_decreased_from_on_initialize_when_xcm_channel_is_uncongested() { - run_test(|| { - Bridge::::put(uncongested_bridge(FixedU128::from_rational(125, 100))); - - // it shold eventually decreased to one - while XcmBridgeHubRouter::bridge().delivery_fee_factor > MINIMAL_DELIVERY_FEE_FACTOR { - XcmBridgeHubRouter::on_initialize(One::one()); - } - - // verify that it doesn't decreases anymore - XcmBridgeHubRouter::on_initialize(One::one()); - assert_eq!( - XcmBridgeHubRouter::bridge(), - uncongested_bridge(MINIMAL_DELIVERY_FEE_FACTOR) - ); - }) - } - - #[test] - fn not_applicable_if_destination_is_within_other_network() { - run_test(|| { - assert_eq!( - send_xcm::( - Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]), - vec![].into(), - ), - Err(SendError::NotApplicable), - ); - }); - } - - #[test] - fn exceeds_max_message_size_if_size_is_above_hard_limit() { - run_test(|| { - assert_eq!( - send_xcm::( - Location::new(2, [GlobalConsensus(Rococo), Parachain(1000)]), - vec![ClearOrigin; HARD_MESSAGE_SIZE_LIMIT as usize].into(), - ), - Err(SendError::ExceedsMaxMessageSize), - ); - }); - } - - #[test] - fn destination_unsupported_if_wrap_version_fails() { - run_test(|| { - assert_eq!( - send_xcm::( - UnknownXcmVersionLocation::get(), - vec![ClearOrigin].into(), - ), - Err(SendError::DestinationUnsupported), - ); - }); - } - - #[test] - fn returns_proper_delivery_price() { - run_test(|| { - let dest = Location::new(2, [GlobalConsensus(BridgedNetworkId::get())]); - let xcm: Xcm<()> = vec![ClearOrigin].into(); - let msg_size = xcm.encoded_size(); - - // initially the base fee is used: `BASE_FEE + BYTE_FEE * msg_size + HRMP_FEE` - let expected_fee = BASE_FEE + BYTE_FEE * (msg_size as u128) + HRMP_FEE; - assert_eq!( - XcmBridgeHubRouter::validate(&mut Some(dest.clone()), &mut Some(xcm.clone())) - .unwrap() - .1 - .get(0), - Some(&(BridgeFeeAsset::get(), expected_fee).into()), - ); - - // but when factor is larger than one, it increases the fee, so it becomes: - // `(BASE_FEE + BYTE_FEE * msg_size) * F + HRMP_FEE` - let factor = FixedU128::from_rational(125, 100); - Bridge::::put(uncongested_bridge(factor)); - let expected_fee = - (FixedU128::saturating_from_integer(BASE_FEE + BYTE_FEE * (msg_size as u128)) * - factor) - .into_inner() / FixedU128::DIV + - HRMP_FEE; - assert_eq!( - XcmBridgeHubRouter::validate(&mut Some(dest), &mut Some(xcm)).unwrap().1.get(0), - Some(&(BridgeFeeAsset::get(), expected_fee).into()), - ); - }); - } - - #[test] - fn sent_message_doesnt_increase_factor_if_xcm_channel_is_uncongested() { - run_test(|| { - let old_bridge = XcmBridgeHubRouter::bridge(); - assert_ok!(send_xcm::( - Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), - vec![ClearOrigin].into(), - ) - .map(drop)); - - assert!(TestToBridgeHubSender::is_message_sent()); - assert_eq!(old_bridge, XcmBridgeHubRouter::bridge()); - }); - } - - #[test] - fn sent_message_increases_factor_if_xcm_channel_is_congested() { - run_test(|| { - TestWithBridgeHubChannel::make_congested(); - - let old_bridge = XcmBridgeHubRouter::bridge(); - assert_ok!(send_xcm::( - Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), - vec![ClearOrigin].into(), - ) - .map(drop)); - - assert!(TestToBridgeHubSender::is_message_sent()); - assert!( - old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor - ); - }); - } - - #[test] - fn sent_message_increases_factor_if_bridge_has_reported_congestion() { - run_test(|| { - Bridge::::put(congested_bridge(MINIMAL_DELIVERY_FEE_FACTOR)); - - let old_bridge = XcmBridgeHubRouter::bridge(); - assert_ok!(send_xcm::( - Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(1000)]), - vec![ClearOrigin].into(), - ) - .map(drop)); - - assert!(TestToBridgeHubSender::is_message_sent()); - assert!( - old_bridge.delivery_fee_factor < XcmBridgeHubRouter::bridge().delivery_fee_factor - ); - }); - } -} diff --git a/modules/xcm-bridge-hub-router/src/mock.rs b/modules/xcm-bridge-hub-router/src/mock.rs deleted file mode 100644 index 6dbfba5f6fdc1f521fb2fdf000ffb778740435e6..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub-router/src/mock.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg(test)] - -use crate as pallet_xcm_bridge_hub_router; - -use bp_xcm_bridge_hub_router::XcmChannelStatusProvider; -use frame_support::{ - construct_runtime, derive_impl, parameter_types, - traits::{Contains, Equals}, -}; -use frame_system::EnsureRoot; -use sp_runtime::{traits::ConstU128, BuildStorage}; -use xcm::prelude::*; -use xcm_builder::{NetworkExportTable, NetworkExportTableItem}; - -pub type AccountId = u64; -type Block = frame_system::mocking::MockBlock; - -/// HRMP fee. -pub const HRMP_FEE: u128 = 500; -/// Base bridge fee. -pub const BASE_FEE: u128 = 1_000_000; -/// Byte bridge fee. -pub const BYTE_FEE: u128 = 1_000; - -construct_runtime! { - pub enum TestRuntime - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - XcmBridgeHubRouter: pallet_xcm_bridge_hub_router::{Pallet, Storage}, - } -} - -parameter_types! { - pub ThisNetworkId: NetworkId = Polkadot; - pub BridgedNetworkId: NetworkId = Kusama; - pub UniversalLocation: InteriorLocation = [GlobalConsensus(ThisNetworkId::get()), Parachain(1000)].into(); - pub SiblingBridgeHubLocation: Location = ParentThen([Parachain(1002)].into()).into(); - pub BridgeFeeAsset: AssetId = Location::parent().into(); - pub BridgeTable: Vec - = vec![ - NetworkExportTableItem::new( - BridgedNetworkId::get(), - None, - SiblingBridgeHubLocation::get(), - Some((BridgeFeeAsset::get(), BASE_FEE).into()) - ) - ]; - pub UnknownXcmVersionLocation: Location = Location::new(2, [GlobalConsensus(BridgedNetworkId::get()), Parachain(9999)]); -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type Block = Block; -} - -impl pallet_xcm_bridge_hub_router::Config<()> for TestRuntime { - type WeightInfo = (); - - type UniversalLocation = UniversalLocation; - type BridgedNetworkId = BridgedNetworkId; - type Bridges = NetworkExportTable; - type DestinationVersion = - LatestOrNoneForLocationVersionChecker>; - - type BridgeHubOrigin = EnsureRoot; - type ToBridgeHubSender = TestToBridgeHubSender; - type WithBridgeHubChannel = TestWithBridgeHubChannel; - - type ByteFee = ConstU128; - type FeeAsset = BridgeFeeAsset; -} - -pub struct LatestOrNoneForLocationVersionChecker(sp_std::marker::PhantomData); -impl> GetVersion - for LatestOrNoneForLocationVersionChecker -{ - fn get_version_for(dest: &Location) -> Option { - if LocationValue::contains(dest) { - return None - } - Some(XCM_VERSION) - } -} - -pub struct TestToBridgeHubSender; - -impl TestToBridgeHubSender { - pub fn is_message_sent() -> bool { - frame_support::storage::unhashed::get_or_default(b"TestToBridgeHubSender.Sent") - } -} - -impl SendXcm for TestToBridgeHubSender { - type Ticket = (); - - fn validate( - _destination: &mut Option, - _message: &mut Option>, - ) -> SendResult { - Ok(((), (BridgeFeeAsset::get(), HRMP_FEE).into())) - } - - fn deliver(_ticket: Self::Ticket) -> Result { - frame_support::storage::unhashed::put(b"TestToBridgeHubSender.Sent", &true); - Ok([0u8; 32]) - } -} - -pub struct TestWithBridgeHubChannel; - -impl TestWithBridgeHubChannel { - pub fn make_congested() { - frame_support::storage::unhashed::put(b"TestWithBridgeHubChannel.Congested", &true); - } -} - -impl XcmChannelStatusProvider for TestWithBridgeHubChannel { - fn is_congested() -> bool { - frame_support::storage::unhashed::get_or_default(b"TestWithBridgeHubChannel.Congested") - } -} - -/// Return test externalities to use in tests. -pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - sp_io::TestExternalities::new(t) -} - -/// Run pallet test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - new_test_ext().execute_with(test) -} diff --git a/modules/xcm-bridge-hub-router/src/weights.rs b/modules/xcm-bridge-hub-router/src/weights.rs deleted file mode 100644 index b0c8fc6252cd5e6eaa968cce06636a308e1c7e05..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub-router/src/weights.rs +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Autogenerated weights for pallet_xcm_bridge_hub_router -//! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-08-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `covid`, CPU: `11th Gen Intel(R) Core(TM) i7-11800H @ 2.30GHz` -//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// Executed Command: -// target/release/rip-bridge-node -// benchmark -// pallet -// --chain=dev -// --steps=50 -// --repeat=20 -// --pallet=pallet_xcm_bridge_hub_router -// --extrinsic=* -// --execution=wasm -// --wasm-execution=Compiled -// --heap-pages=4096 -// --output=./modules/xcm-bridge-hub-router/src/weights.rs -// --template=./.maintain/bridge-weight-template.hbs - -#![allow(clippy::all)] -#![allow(unused_parens)] -#![allow(unused_imports)] -#![allow(missing_docs)] - -use frame_support::{ - traits::Get, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::marker::PhantomData; - -/// Weight functions needed for pallet_xcm_bridge_hub_router. -pub trait WeightInfo { - fn on_initialize_when_non_congested() -> Weight; - fn on_initialize_when_congested() -> Weight; - fn report_bridge_status() -> Weight; - fn send_message() -> Weight; -} - -/// Weights for `pallet_xcm_bridge_hub_router` that are generated using one of the Bridge testnets. -/// -/// Those weights are test only and must never be used in production. -pub struct BridgeWeight(PhantomData); -impl WeightInfo for BridgeWeight { - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - /// - /// Storage: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` - /// (r:1 w:0) - /// - /// Proof: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` (r:1 - /// w:0) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `53` - // Estimated: `3518` - // Minimum execution time: 11_934 nanoseconds. - Weight::from_parts(12_201_000, 3518) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - /// - /// Storage: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` - /// (r:1 w:0) - /// - /// Proof: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` (r:1 - /// w:0) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `94` - // Estimated: `3559` - // Minimum execution time: 9_010 nanoseconds. - Weight::from_parts(9_594_000, 3559) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `53` - // Estimated: `1502` - // Minimum execution time: 10_427 nanoseconds. - Weight::from_parts(10_682_000, 1502) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - /// - /// Storage: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` - /// (r:1 w:0) - /// - /// Proof: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` (r:1 - /// w:0) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3517` - // Minimum execution time: 19_709 nanoseconds. - Weight::from_parts(20_110_000, 3517) - .saturating_add(T::DbWeight::get().reads(2_u64)) - .saturating_add(T::DbWeight::get().writes(1_u64)) - } -} - -// For backwards compatibility and tests -impl WeightInfo for () { - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - /// - /// Storage: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` - /// (r:1 w:0) - /// - /// Proof: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` (r:1 - /// w:0) - fn on_initialize_when_non_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `53` - // Estimated: `3518` - // Minimum execution time: 11_934 nanoseconds. - Weight::from_parts(12_201_000, 3518) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - /// - /// Storage: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` - /// (r:1 w:0) - /// - /// Proof: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` (r:1 - /// w:0) - fn on_initialize_when_congested() -> Weight { - // Proof Size summary in bytes: - // Measured: `94` - // Estimated: `3559` - // Minimum execution time: 9_010 nanoseconds. - Weight::from_parts(9_594_000, 3559) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - fn report_bridge_status() -> Weight { - // Proof Size summary in bytes: - // Measured: `53` - // Estimated: `1502` - // Minimum execution time: 10_427 nanoseconds. - Weight::from_parts(10_682_000, 1502) - .saturating_add(RocksDbWeight::get().reads(1_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } - /// Storage: `XcmBridgeHubRouter::Bridge` (r:1 w:1) - /// - /// Proof: `XcmBridgeHubRouter::Bridge` (`max_values`: Some(1), `max_size`: Some(17), added: - /// 512, mode: `MaxEncodedLen`) - /// - /// Storage: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` - /// (r:1 w:0) - /// - /// Proof: UNKNOWN KEY `0x456d756c617465645369626c696e6758636d704368616e6e656c2e436f6e6765` (r:1 - /// w:0) - fn send_message() -> Weight { - // Proof Size summary in bytes: - // Measured: `52` - // Estimated: `3517` - // Minimum execution time: 19_709 nanoseconds. - Weight::from_parts(20_110_000, 3517) - .saturating_add(RocksDbWeight::get().reads(2_u64)) - .saturating_add(RocksDbWeight::get().writes(1_u64)) - } -} diff --git a/modules/xcm-bridge-hub/Cargo.toml b/modules/xcm-bridge-hub/Cargo.toml deleted file mode 100644 index aaa11494143369fded501578930da658879e31e2..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub/Cargo.toml +++ /dev/null @@ -1,77 +0,0 @@ -[package] -name = "pallet-xcm-bridge-hub" -description = "Module that adds dynamic bridges/lanes support to XCM infrastucture at the bridge hub." -version = "0.2.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -log = { workspace = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Bridge Dependencies -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-xcm-bridge-hub = { path = "../../primitives/xcm-bridge-hub", default-features = false } -pallet-bridge-messages = { path = "../messages", default-features = false } -bridge-runtime-common = { path = "../../bin/runtime-common", default-features = false } - -# Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -# Polkadot Dependencies -xcm = { package = "staging-xcm", git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -xcm-builder = { package = "staging-xcm-builder", git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -xcm-executor = { package = "staging-xcm-executor", git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -bp-header-chain = { path = "../../primitives/header-chain" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "bp-xcm-bridge-hub/std", - "bridge-runtime-common/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "log/std", - "pallet-bridge-messages/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", - "xcm-builder/std", - "xcm-executor/std", - "xcm/std", -] -runtime-benchmarks = [ - "bridge-runtime-common/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "frame-system/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", - "pallet-bridge-messages/runtime-benchmarks", - "sp-runtime/runtime-benchmarks", - "xcm-builder/runtime-benchmarks", - "xcm-executor/runtime-benchmarks", -] -try-runtime = [ - "frame-support/try-runtime", - "frame-system/try-runtime", - "pallet-balances/try-runtime", - "pallet-bridge-messages/try-runtime", - "sp-runtime/try-runtime", -] diff --git a/modules/xcm-bridge-hub/src/exporter.rs b/modules/xcm-bridge-hub/src/exporter.rs deleted file mode 100644 index 94ec8b5f106fdb9ce5e229a41579d26e789b5673..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub/src/exporter.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The code that allows to use the pallet (`pallet-xcm-bridge-hub`) as XCM message -//! exporter at the sending bridge hub. Internally, it just enqueues outbound blob -//! in the messages pallet queue. -//! -//! This code is executed at the source bridge hub. - -use crate::{Config, Pallet, LOG_TARGET}; - -use bp_messages::source_chain::MessagesBridge; -use bp_xcm_bridge_hub::XcmAsPlainPayload; -use bridge_runtime_common::messages_xcm_extension::{LocalXcmQueueManager, SenderAndLane}; -use pallet_bridge_messages::{Config as BridgeMessagesConfig, Pallet as BridgeMessagesPallet}; -use xcm::prelude::*; -use xcm_builder::{HaulBlob, HaulBlobError, HaulBlobExporter}; -use xcm_executor::traits::ExportXcm; - -/// An easy way to access `HaulBlobExporter`. -pub type PalletAsHaulBlobExporter = HaulBlobExporter< - DummyHaulBlob, - >::BridgedNetwork, - >::DestinationVersion, - >::MessageExportPrice, ->; -/// An easy way to access associated messages pallet. -type MessagesPallet = BridgeMessagesPallet>::BridgeMessagesPalletInstance>; - -impl, I: 'static> ExportXcm for Pallet -where - T: BridgeMessagesConfig, -{ - type Ticket = ( - SenderAndLane, - as MessagesBridge>::SendMessageArgs, - XcmHash, - ); - - fn validate( - network: NetworkId, - channel: u32, - universal_source: &mut Option, - destination: &mut Option, - message: &mut Option>, - ) -> Result<(Self::Ticket, Assets), SendError> { - // Find supported lane_id. - let sender_and_lane = Self::lane_for( - universal_source.as_ref().ok_or(SendError::MissingArgument)?, - (&network, destination.as_ref().ok_or(SendError::MissingArgument)?), - ) - .ok_or(SendError::NotApplicable)?; - - // check if we are able to route the message. We use existing `HaulBlobExporter` for that. - // It will make all required changes and will encode message properly, so that the - // `DispatchBlob` at the bridged bridge hub will be able to decode it - let ((blob, id), price) = PalletAsHaulBlobExporter::::validate( - network, - channel, - universal_source, - destination, - message, - )?; - - let bridge_message = MessagesPallet::::validate_message(sender_and_lane.lane, &blob) - .map_err(|e| { - log::debug!( - target: LOG_TARGET, - "XCM message {:?} cannot be exported because of bridge error {:?} on bridge {:?}", - id, - e, - sender_and_lane.lane, - ); - SendError::Transport("BridgeValidateError") - })?; - - Ok(((sender_and_lane, bridge_message, id), price)) - } - - fn deliver((sender_and_lane, bridge_message, id): Self::Ticket) -> Result { - let lane_id = sender_and_lane.lane; - let artifacts = MessagesPallet::::send_message(bridge_message); - - log::info!( - target: LOG_TARGET, - "XCM message {:?} has been enqueued at bridge {:?} with nonce {}", - id, - lane_id, - artifacts.nonce, - ); - - // notify XCM queue manager about updated lane state - LocalXcmQueueManager::::on_bridge_message_enqueued( - &sender_and_lane, - artifacts.enqueued_messages, - ); - - Ok(id) - } -} - -/// Dummy implementation of the `HaulBlob` trait that is never called. -/// -/// We are using `HaulBlobExporter`, which requires `HaulBlob` implementation. It assumes that -/// there's a single channel between two bridge hubs - `HaulBlob` only accepts the blob and nothing -/// else. But bridge messages pallet may have a dedicated channel (lane) for every pair of bridged -/// chains. So we are using our own `ExportXcm` implementation, but to utilize `HaulBlobExporter` we -/// still need this `DummyHaulBlob`. -pub struct DummyHaulBlob; - -impl HaulBlob for DummyHaulBlob { - fn haul_blob(_blob: XcmAsPlainPayload) -> Result<(), HaulBlobError> { - Err(HaulBlobError::Transport("DummyHaulBlob")) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use frame_support::assert_ok; - use xcm_executor::traits::export_xcm; - - fn universal_source() -> InteriorLocation { - [GlobalConsensus(RelayNetwork::get()), Parachain(SIBLING_ASSET_HUB_ID)].into() - } - - fn universal_destination() -> InteriorLocation { - BridgedDestination::get() - } - - #[test] - fn export_works() { - run_test(|| { - assert_ok!(export_xcm::( - BridgedRelayNetwork::get(), - 0, - universal_source(), - universal_destination(), - vec![Instruction::ClearOrigin].into(), - )); - }) - } - - #[test] - fn export_fails_if_argument_is_missing() { - run_test(|| { - assert_eq!( - XcmOverBridge::validate( - BridgedRelayNetwork::get(), - 0, - &mut None, - &mut Some(universal_destination()), - &mut Some(Vec::new().into()), - ), - Err(SendError::MissingArgument), - ); - - assert_eq!( - XcmOverBridge::validate( - BridgedRelayNetwork::get(), - 0, - &mut Some(universal_source()), - &mut None, - &mut Some(Vec::new().into()), - ), - Err(SendError::MissingArgument), - ); - }) - } - - #[test] - fn exporter_computes_correct_lane_id() { - run_test(|| { - let expected_lane_id = TEST_LANE_ID; - - assert_eq!( - XcmOverBridge::validate( - BridgedRelayNetwork::get(), - 0, - &mut Some(universal_source()), - &mut Some(universal_destination()), - &mut Some(Vec::new().into()), - ) - .unwrap() - .0 - .0 - .lane, - expected_lane_id, - ); - }) - } -} diff --git a/modules/xcm-bridge-hub/src/lib.rs b/modules/xcm-bridge-hub/src/lib.rs deleted file mode 100644 index 60b988497fc59e94cbfe1a6e30cd6f3039d8c331..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub/src/lib.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module that adds XCM support to bridge pallets. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use bridge_runtime_common::messages_xcm_extension::XcmBlobHauler; -use pallet_bridge_messages::Config as BridgeMessagesConfig; -use xcm::prelude::*; - -pub use exporter::PalletAsHaulBlobExporter; -pub use pallet::*; - -mod exporter; -mod mock; - -/// The target that will be used when publishing logs related to this pallet. -pub const LOG_TARGET: &str = "runtime::bridge-xcm"; - -#[frame_support::pallet] -pub mod pallet { - use super::*; - use bridge_runtime_common::messages_xcm_extension::SenderAndLane; - use frame_support::pallet_prelude::*; - use frame_system::pallet_prelude::BlockNumberFor; - - #[pallet::config] - #[pallet::disable_frame_system_supertrait_check] - pub trait Config: - BridgeMessagesConfig - { - /// Runtime's universal location. - type UniversalLocation: Get; - // TODO: https://github.com/paritytech/parity-bridges-common/issues/1666 remove `ChainId` and - // replace it with the `NetworkId` - then we'll be able to use - // `T as pallet_bridge_messages::Config::BridgedChain::NetworkId` - /// Bridged network as relative location of bridged `GlobalConsensus`. - #[pallet::constant] - type BridgedNetwork: Get; - /// Associated messages pallet instance that bridges us with the - /// `BridgedNetworkId` consensus. - type BridgeMessagesPalletInstance: 'static; - - /// Price of single message export to the bridged consensus (`Self::BridgedNetworkId`). - type MessageExportPrice: Get; - /// Checks the XCM version for the destination. - type DestinationVersion: GetVersion; - - /// Get point-to-point links with bridged consensus (`Self::BridgedNetworkId`). - /// (this will be replaced with dynamic on-chain bridges - `Bridges V2`) - type Lanes: Get>; - /// Support for point-to-point links - /// (this will be replaced with dynamic on-chain bridges - `Bridges V2`) - type LanesSupport: XcmBlobHauler; - } - - #[pallet::pallet] - pub struct Pallet(PhantomData<(T, I)>); - - #[pallet::hooks] - impl, I: 'static> Hooks> for Pallet { - fn integrity_test() { - assert!( - Self::bridged_network_id().is_some(), - "Configured `T::BridgedNetwork`: {:?} does not contain `GlobalConsensus` junction with `NetworkId`", - T::BridgedNetwork::get() - ) - } - } - - impl, I: 'static> Pallet { - /// Returns dedicated/configured lane identifier. - pub(crate) fn lane_for( - source: &InteriorLocation, - dest: (&NetworkId, &InteriorLocation), - ) -> Option { - let source = source.clone().relative_to(&T::UniversalLocation::get()); - - // Check that we have configured a point-to-point lane for 'source' and `dest`. - T::Lanes::get() - .into_iter() - .find_map(|(lane_source, (lane_dest_network, lane_dest))| { - if lane_source.location == source && - &lane_dest_network == dest.0 && - Self::bridged_network_id().as_ref() == Some(dest.0) && - &lane_dest == dest.1 - { - Some(lane_source) - } else { - None - } - }) - } - - /// Returns some `NetworkId` if contains `GlobalConsensus` junction. - fn bridged_network_id() -> Option { - match T::BridgedNetwork::get().take_first_interior() { - Some(GlobalConsensus(network)) => Some(network), - _ => None, - } - } - } -} diff --git a/modules/xcm-bridge-hub/src/mock.rs b/modules/xcm-bridge-hub/src/mock.rs deleted file mode 100644 index e40e1f9fb65157feffebeaa53e16c7def2ad22e0..0000000000000000000000000000000000000000 --- a/modules/xcm-bridge-hub/src/mock.rs +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg(test)] - -use crate as pallet_xcm_bridge_hub; - -use bp_messages::{ - target_chain::{DispatchMessage, MessageDispatch}, - LaneId, -}; -use bp_runtime::{messages::MessageDispatchResult, Chain, ChainId, UnderlyingChainProvider}; -use bridge_runtime_common::{ - messages::{ - source::TargetHeaderChainAdapter, target::SourceHeaderChainAdapter, - BridgedChainWithMessages, HashOf, MessageBridge, ThisChainWithMessages, - }, - messages_xcm_extension::{SenderAndLane, XcmBlobHauler}, -}; -use codec::Encode; -use frame_support::{derive_impl, parameter_types, traits::ConstU32, weights::RuntimeDbWeight}; -use sp_core::H256; -use sp_runtime::{ - testing::Header as SubstrateHeader, - traits::{BlakeTwo256, IdentityLookup}, - AccountId32, BuildStorage, -}; -use xcm::prelude::*; - -pub type AccountId = AccountId32; -pub type Balance = u64; - -type Block = frame_system::mocking::MockBlock; - -pub const SIBLING_ASSET_HUB_ID: u32 = 2001; -pub const THIS_BRIDGE_HUB_ID: u32 = 2002; -pub const BRIDGED_ASSET_HUB_ID: u32 = 1001; -pub const TEST_LANE_ID: LaneId = LaneId([0, 0, 0, 1]); - -frame_support::construct_runtime! { - pub enum TestRuntime { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Event}, - Messages: pallet_bridge_messages::{Pallet, Call, Event}, - XcmOverBridge: pallet_xcm_bridge_hub::{Pallet}, - } -} - -parameter_types! { - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { read: 1, write: 2 }; - pub const ExistentialDeposit: Balance = 1; -} - -#[derive_impl(frame_system::config_preludes::TestDefaultConfig as frame_system::DefaultConfig)] -impl frame_system::Config for TestRuntime { - type AccountId = AccountId; - type AccountData = pallet_balances::AccountData; - type Block = Block; - type Lookup = IdentityLookup; -} - -#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig as pallet_balances::DefaultConfig)] -impl pallet_balances::Config for TestRuntime { - type AccountStore = System; -} - -parameter_types! { - pub const ActiveOutboundLanes: &'static [LaneId] = &[TEST_LANE_ID]; -} - -impl pallet_bridge_messages::Config for TestRuntime { - type RuntimeEvent = RuntimeEvent; - type WeightInfo = TestMessagesWeights; - - type BridgedChainId = (); - type ActiveOutboundLanes = ActiveOutboundLanes; - type MaxUnrewardedRelayerEntriesAtInboundLane = (); - type MaxUnconfirmedMessagesAtInboundLane = (); - type MaximalOutboundPayloadSize = ConstU32<2048>; - type OutboundPayload = Vec; - type InboundPayload = Vec; - type InboundRelayer = (); - type DeliveryPayments = (); - type TargetHeaderChain = TargetHeaderChainAdapter; - type DeliveryConfirmationPayments = (); - type OnMessagesDelivered = (); - type SourceHeaderChain = SourceHeaderChainAdapter; - type MessageDispatch = TestMessageDispatch; -} - -pub struct TestMessagesWeights; - -impl pallet_bridge_messages::WeightInfo for TestMessagesWeights { - fn receive_single_message_proof() -> Weight { - Weight::zero() - } - fn receive_single_message_proof_with_outbound_lane_state() -> Weight { - Weight::zero() - } - fn receive_delivery_proof_for_single_message() -> Weight { - Weight::zero() - } - fn receive_delivery_proof_for_two_messages_by_single_relayer() -> Weight { - Weight::zero() - } - fn receive_delivery_proof_for_two_messages_by_two_relayers() -> Weight { - Weight::zero() - } - - fn receive_two_messages_proof() -> Weight { - Weight::zero() - } - - fn receive_single_message_proof_1_kb() -> Weight { - Weight::zero() - } - - fn receive_single_message_proof_16_kb() -> Weight { - Weight::zero() - } - - fn receive_single_message_proof_with_dispatch(_: u32) -> Weight { - Weight::from_parts(1, 0) - } -} - -impl pallet_bridge_messages::WeightInfoExt for TestMessagesWeights { - fn expected_extra_storage_proof_size() -> u32 { - 0 - } - - fn receive_messages_proof_overhead_from_runtime() -> Weight { - Weight::zero() - } - - fn receive_messages_delivery_proof_overhead_from_runtime() -> Weight { - Weight::zero() - } -} - -parameter_types! { - pub const RelayNetwork: NetworkId = NetworkId::Kusama; - pub const BridgedRelayNetwork: NetworkId = NetworkId::Polkadot; - pub BridgedRelayNetworkLocation: Location = (Parent, GlobalConsensus(BridgedRelayNetwork::get())).into(); - pub const NonBridgedRelayNetwork: NetworkId = NetworkId::Rococo; - pub const BridgeReserve: Balance = 100_000; - pub UniversalLocation: InteriorLocation = [ - GlobalConsensus(RelayNetwork::get()), - Parachain(THIS_BRIDGE_HUB_ID), - ].into(); - pub const Penalty: Balance = 1_000; -} - -impl pallet_xcm_bridge_hub::Config for TestRuntime { - type UniversalLocation = UniversalLocation; - type BridgedNetwork = BridgedRelayNetworkLocation; - type BridgeMessagesPalletInstance = (); - - type MessageExportPrice = (); - type DestinationVersion = AlwaysLatest; - - type Lanes = TestLanes; - type LanesSupport = TestXcmBlobHauler; -} - -parameter_types! { - pub TestSenderAndLane: SenderAndLane = SenderAndLane { - location: Location::new(1, [Parachain(SIBLING_ASSET_HUB_ID)]), - lane: TEST_LANE_ID, - }; - pub BridgedDestination: InteriorLocation = [ - Parachain(BRIDGED_ASSET_HUB_ID) - ].into(); - pub TestLanes: sp_std::vec::Vec<(SenderAndLane, (NetworkId, InteriorLocation))> = sp_std::vec![ - (TestSenderAndLane::get(), (BridgedRelayNetwork::get(), BridgedDestination::get())) - ]; -} - -pub struct TestXcmBlobHauler; -impl XcmBlobHauler for TestXcmBlobHauler { - type Runtime = TestRuntime; - type MessagesInstance = (); - type ToSourceChainSender = (); - type CongestedMessage = (); - type UncongestedMessage = (); -} - -pub struct ThisChain; - -impl Chain for ThisChain { - const ID: ChainId = *b"tuch"; - type BlockNumber = u64; - type Hash = H256; - type Hasher = BlakeTwo256; - type Header = SubstrateHeader; - type AccountId = AccountId; - type Balance = Balance; - type Nonce = u64; - type Signature = sp_runtime::MultiSignature; - - fn max_extrinsic_size() -> u32 { - u32::MAX - } - - fn max_extrinsic_weight() -> Weight { - Weight::MAX - } -} - -pub struct BridgedChain; -pub type BridgedHeaderHash = H256; -pub type BridgedChainHeader = SubstrateHeader; - -impl Chain for BridgedChain { - const ID: ChainId = *b"tuch"; - type BlockNumber = u64; - type Hash = BridgedHeaderHash; - type Hasher = BlakeTwo256; - type Header = BridgedChainHeader; - type AccountId = AccountId; - type Balance = Balance; - type Nonce = u64; - type Signature = sp_runtime::MultiSignature; - - fn max_extrinsic_size() -> u32 { - 4096 - } - - fn max_extrinsic_weight() -> Weight { - Weight::MAX - } -} - -/// Test message dispatcher. -pub struct TestMessageDispatch; - -impl TestMessageDispatch { - pub fn deactivate(lane: LaneId) { - frame_support::storage::unhashed::put(&(b"inactive", lane).encode()[..], &false); - } -} - -impl MessageDispatch for TestMessageDispatch { - type DispatchPayload = Vec; - type DispatchLevelResult = (); - - fn is_active() -> bool { - frame_support::storage::unhashed::take::(&(b"inactive").encode()[..]) != Some(false) - } - - fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { - Weight::zero() - } - - fn dispatch( - _: DispatchMessage, - ) -> MessageDispatchResult { - MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } - } -} - -pub struct WrappedThisChain; -impl UnderlyingChainProvider for WrappedThisChain { - type Chain = ThisChain; -} -impl ThisChainWithMessages for WrappedThisChain { - type RuntimeOrigin = RuntimeOrigin; -} - -pub struct WrappedBridgedChain; -impl UnderlyingChainProvider for WrappedBridgedChain { - type Chain = BridgedChain; -} -impl BridgedChainWithMessages for WrappedBridgedChain {} - -pub struct BridgedHeaderChain; -impl bp_header_chain::HeaderChain for BridgedHeaderChain { - fn finalized_header_state_root( - _hash: HashOf, - ) -> Option> { - unreachable!() - } -} - -/// Bridge that is deployed on `ThisChain` and allows sending/receiving messages to/from -/// `BridgedChain`. -#[derive(Debug, PartialEq, Eq)] -pub struct OnThisChainBridge; - -impl MessageBridge for OnThisChainBridge { - const BRIDGED_MESSAGES_PALLET_NAME: &'static str = ""; - - type ThisChain = WrappedThisChain; - type BridgedChain = WrappedBridgedChain; - type BridgedHeaderChain = BridgedHeaderChain; -} - -/// Run pallet test. -pub fn run_test(test: impl FnOnce() -> T) -> T { - sp_io::TestExternalities::new( - frame_system::GenesisConfig::::default().build_storage().unwrap(), - ) - .execute_with(test) -} diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml deleted file mode 100644 index 4785f8297ba06b63a59f8c5d0b883990c27755f4..0000000000000000000000000000000000000000 --- a/primitives/beefy/Cargo.toml +++ /dev/null @@ -1,44 +0,0 @@ -[package] -name = "bp-beefy" -description = "Primitives of pallet-bridge-beefy module." -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive", "bit-vec"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } -serde = { default-features = false, features = ["alloc", "derive"], workspace = true } - -# Bridge Dependencies - -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Dependencies - -binary-merkle-tree = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-consensus-beefy = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -pallet-beefy-mmr = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -pallet-mmr = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = [ "std" ] -std = [ - "bp-runtime/std", - "codec/std", - "frame-support/std", - "pallet-beefy-mmr/std", - "pallet-mmr/std", - "scale-info/std", - "serde/std", - "sp-consensus-beefy/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs deleted file mode 100644 index 0441781e79a66f785b985047ad56da70c0f13d49..0000000000000000000000000000000000000000 --- a/primitives/beefy/src/lib.rs +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives that are used to interact with BEEFY bridge pallet. - -#![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] - -pub use binary_merkle_tree::merkle_root; -pub use pallet_beefy_mmr::BeefyEcdsaToEthereum; -pub use pallet_mmr::{ - primitives::{DataOrHash as MmrDataOrHash, Proof as MmrProof}, - verify_leaves_proof as verify_mmr_leaves_proof, -}; -pub use sp_consensus_beefy::{ - ecdsa_crypto::{ - AuthorityId as EcdsaValidatorId, AuthoritySignature as EcdsaValidatorSignature, - }, - known_payloads::MMR_ROOT_ID as MMR_ROOT_PAYLOAD_ID, - mmr::{BeefyAuthoritySet, MmrLeafVersion}, - BeefyAuthorityId, Commitment, Payload as BeefyPayload, SignedCommitment, ValidatorSet, - ValidatorSetId, BEEFY_ENGINE_ID, -}; - -use bp_runtime::{BasicOperatingMode, BlockNumberOf, Chain, HashOf}; -use codec::{Decode, Encode}; -use frame_support::Parameter; -use scale_info::TypeInfo; -use serde::{Deserialize, Serialize}; -use sp_runtime::{ - traits::{Convert, MaybeSerializeDeserialize}, - RuntimeAppPublic, RuntimeDebug, -}; -use sp_std::prelude::*; - -/// Substrate-based chain with BEEFY && MMR pallets deployed. -/// -/// Both BEEFY and MMR pallets and their clients may be configured to use different -/// primitives. Some of types can be configured in low-level pallets, but are constrained -/// when BEEFY+MMR bundle is used. -pub trait ChainWithBeefy: Chain { - /// The hashing algorithm used to compute the digest of the BEEFY commitment. - /// - /// Corresponds to the hashing algorithm, used by `sc_consensus_beefy::BeefyKeystore`. - type CommitmentHasher: sp_runtime::traits::Hash; - - /// The hashing algorithm used to build the MMR. - /// - /// The same algorithm is also used to compute merkle roots in BEEFY - /// (e.g. validator addresses root in leaf data). - /// - /// Corresponds to the `Hashing` field of the `pallet-mmr` configuration. - type MmrHashing: sp_runtime::traits::Hash; - - /// The output type of the hashing algorithm used to build the MMR. - /// - /// This type is actually stored in the MMR. - - /// Corresponds to the `Hash` field of the `pallet-mmr` configuration. - type MmrHash: sp_std::hash::Hash - + Parameter - + Copy - + AsRef<[u8]> - + Default - + MaybeSerializeDeserialize - + PartialOrd; - - /// The type expected for the MMR leaf extra data. - type BeefyMmrLeafExtra: Parameter; - - /// A way to identify a BEEFY validator. - /// - /// Corresponds to the `BeefyId` field of the `pallet-beefy` configuration. - type AuthorityId: BeefyAuthorityId + Parameter; - - /// A way to convert validator id to its raw representation in the BEEFY merkle tree. - /// - /// Corresponds to the `BeefyAuthorityToMerkleLeaf` field of the `pallet-beefy-mmr` - /// configuration. - type AuthorityIdToMerkleLeaf: Convert>; -} - -/// BEEFY validator id used by given Substrate chain. -pub type BeefyAuthorityIdOf = ::AuthorityId; -/// BEEFY validator set, containing both validator identifiers and the numeric set id. -pub type BeefyAuthoritySetOf = ValidatorSet>; -/// BEEFY authority set, containing both validator identifiers and the numeric set id. -pub type BeefyAuthoritySetInfoOf = sp_consensus_beefy::mmr::BeefyAuthoritySet>; -/// BEEFY validator signature used by given Substrate chain. -pub type BeefyValidatorSignatureOf = - <::AuthorityId as RuntimeAppPublic>::Signature; -/// Signed BEEFY commitment used by given Substrate chain. -pub type BeefySignedCommitmentOf = - SignedCommitment, BeefyValidatorSignatureOf>; -/// Hash algorithm, used to compute the digest of the BEEFY commitment before signing it. -pub type BeefyCommitmentHasher = ::CommitmentHasher; -/// Hash algorithm used in Beefy MMR construction by given Substrate chain. -pub type MmrHashingOf = ::MmrHashing; -/// Hash type, used in MMR construction by given Substrate chain. -pub type MmrHashOf = ::MmrHash; -/// BEEFY MMR proof type used by the given Substrate chain. -pub type MmrProofOf = MmrProof>; -/// The type of the MMR leaf extra data used by the given Substrate chain. -pub type BeefyMmrLeafExtraOf = ::BeefyMmrLeafExtra; -/// A way to convert a validator id to its raw representation in the BEEFY merkle tree, used by -/// the given Substrate chain. -pub type BeefyAuthorityIdToMerkleLeafOf = ::AuthorityIdToMerkleLeaf; -/// Actual type of leafs in the BEEFY MMR. -pub type BeefyMmrLeafOf = sp_consensus_beefy::mmr::MmrLeaf< - BlockNumberOf, - HashOf, - MmrHashOf, - BeefyMmrLeafExtraOf, ->; - -/// Data required for initializing the BEEFY pallet. -/// -/// Provides the initial context that the bridge needs in order to know -/// where to start the sync process from. -#[derive(Encode, Decode, RuntimeDebug, PartialEq, Clone, TypeInfo, Serialize, Deserialize)] -pub struct InitializationData { - /// Pallet operating mode. - pub operating_mode: BasicOperatingMode, - /// Number of the best block, finalized by BEEFY. - pub best_block_number: BlockNumber, - /// BEEFY authority set that will be finalizing descendants of the `best_beefy_block_number` - /// block. - pub authority_set: BeefyAuthoritySet, -} - -/// Basic data, stored by the pallet for every imported commitment. -#[derive(Encode, Decode, RuntimeDebug, PartialEq, TypeInfo)] -pub struct ImportedCommitment { - /// Block number and hash of the finalized block parent. - pub parent_number_and_hash: (BlockNumber, BlockHash), - /// MMR root at the imported block. - pub mmr_root: MmrHash, -} diff --git a/primitives/chain-asset-hub-rococo/Cargo.toml b/primitives/chain-asset-hub-rococo/Cargo.toml deleted file mode 100644 index ad0eb39f9e3ae6eb8619b34c5c5489668b4f2329..0000000000000000000000000000000000000000 --- a/primitives/chain-asset-hub-rococo/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "bp-asset-hub-rococo" -description = "Primitives of AssetHubRococo parachain runtime." -version = "0.4.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/primitives/chain-asset-hub-rococo/src/lib.rs b/primitives/chain-asset-hub-rococo/src/lib.rs deleted file mode 100644 index de2e9ae856d1f8756f0a2a6b9cae3da3e265e76e..0000000000000000000000000000000000000000 --- a/primitives/chain-asset-hub-rococo/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubRococo runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubRococo` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubRococo` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubRococo` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToWestendXcmRouter` bridge pallet. - #[codec(index = 45)] - ToWestendXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); -} - -/// Identifier of AssetHubRococo in the Rococo relay chain. -pub const ASSET_HUB_ROCOCO_PARACHAIN_ID: u32 = 1000; diff --git a/primitives/chain-asset-hub-westend/Cargo.toml b/primitives/chain-asset-hub-westend/Cargo.toml deleted file mode 100644 index 15b17f4d02d4d7905c9baba568efd7a79131030f..0000000000000000000000000000000000000000 --- a/primitives/chain-asset-hub-westend/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "bp-asset-hub-westend" -description = "Primitives of AssetHubWestend parachain runtime." -version = "0.3.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Substrate Dependencies -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -# Bridge Dependencies -bp-xcm-bridge-hub-router = { path = "../xcm-bridge-hub-router", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-xcm-bridge-hub-router/std", - "codec/std", - "frame-support/std", - "scale-info/std", -] diff --git a/primitives/chain-asset-hub-westend/src/lib.rs b/primitives/chain-asset-hub-westend/src/lib.rs deleted file mode 100644 index 9de1c88098942cdf7bd0684462a95ac3de412490..0000000000000000000000000000000000000000 --- a/primitives/chain-asset-hub-westend/src/lib.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects AssetHubWestend runtime setup. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; - -pub use bp_xcm_bridge_hub_router::XcmBridgeHubRouterCall; - -/// `AssetHubWestend` Runtime `Call` enum. -/// -/// The enum represents a subset of possible `Call`s we can send to `AssetHubWestend` chain. -/// Ideally this code would be auto-generated from metadata, because we want to -/// avoid depending directly on the ENTIRE runtime just to get the encoding of `Dispatchable`s. -/// -/// All entries here (like pretty much in the entire file) must be kept in sync with -/// `AssetHubWestend` `construct_runtime`, so that we maintain SCALE-compatibility. -#[allow(clippy::large_enum_variant)] -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub enum Call { - /// `ToRococoXcmRouter` bridge pallet. - #[codec(index = 34)] - ToRococoXcmRouter(XcmBridgeHubRouterCall), -} - -frame_support::parameter_types! { - /// Some sane weight to execute `xcm::Transact(pallet-xcm-bridge-hub-router::Call::report_bridge_status)`. - pub const XcmBridgeHubRouterTransactCallMaxWeight: frame_support::weights::Weight = frame_support::weights::Weight::from_parts(200_000_000, 6144); -} - -/// Identifier of AssetHubWestend in the Westend relay chain. -pub const ASSET_HUB_WESTEND_PARACHAIN_ID: u32 = 1000; diff --git a/primitives/chain-bridge-hub-cumulus/Cargo.toml b/primitives/chain-bridge-hub-cumulus/Cargo.toml deleted file mode 100644 index 82c87d8bc13e365f9decf37f981e3cdc1bdd95b6..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-cumulus/Cargo.toml +++ /dev/null @@ -1,40 +0,0 @@ -[package] -name = "bp-bridge-hub-cumulus" -description = "Primitives for BridgeHub parachain runtimes." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -# Bridge Dependencies - -bp-polkadot-core = { path = "../../primitives/polkadot-core", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } - -# Substrate Based Dependencies - -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -# Polkadot Dependencies -polkadot-primitives = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false , branch = "master" } - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "frame-system/std", - "polkadot-primitives/std", - "sp-api/std", - "sp-std/std", -] diff --git a/primitives/chain-bridge-hub-cumulus/src/lib.rs b/primitives/chain-bridge-hub-cumulus/src/lib.rs deleted file mode 100644 index f626fa6df010b96290ca2980d1fc8b4c44623bd5..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-cumulus/src/lib.rs +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of all Cumulus-based bridge hubs. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_polkadot_core::{ - AccountId, AccountInfoStorageMapKeyProvider, AccountPublic, Balance, BlockNumber, Hash, Hasher, - Hashing, Header, Nonce, Perbill, Signature, SignedBlock, UncheckedExtrinsic, - EXTRA_STORAGE_PROOF_SIZE, TX_EXTRA_BYTES, -}; - -use bp_messages::*; -use bp_polkadot_core::SuffixedCommonTransactionExtension; -use bp_runtime::extensions::{ - BridgeRejectObsoleteHeadersAndMessages, RefundBridgedParachainMessagesSchema, -}; -use frame_support::{ - dispatch::DispatchClass, - parameter_types, - sp_runtime::{MultiAddress, MultiSigner}, - weights::constants, -}; -use frame_system::limits; -use sp_std::time::Duration; - -/// Maximal bridge hub header size. -pub const MAX_BRIDGE_HUB_HEADER_SIZE: u32 = 4_096; - -/// Average block interval in Cumulus-based parachains. -/// -/// Corresponds to the `MILLISECS_PER_BLOCK` from `parachains_common` crate. -pub const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_secs(12); - -/// All cumulus bridge hubs allow normal extrinsics to fill block up to 75 percent. -/// -/// This is a copy-paste from the cumulus repo's `parachains-common` crate. -pub const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// All cumulus bridge hubs chains allow for 0.5 seconds of compute with a 6-second average block -/// time. -/// -/// This is a copy-paste from the cumulus repo's `parachains-common` crate. -const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts(constants::WEIGHT_REF_TIME_PER_SECOND, 0) - .saturating_div(2) - .set_proof_size(polkadot_primitives::MAX_POV_SIZE as u64); - -/// We allow for 2 seconds of compute with a 6 second average block. -const MAXIMUM_BLOCK_WEIGHT_FOR_ASYNC_BACKING: Weight = Weight::from_parts( - constants::WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), - polkadot_primitives::MAX_POV_SIZE as u64, -); - -/// All cumulus bridge hubs assume that about 5 percent of the block weight is consumed by -/// `on_initialize` handlers. This is used to limit the maximal weight of a single extrinsic. -/// -/// This is a copy-paste from the cumulus repo's `parachains-common` crate. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(5); - -parameter_types! { - /// Size limit of the Cumulus-based bridge hub blocks. - pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio( - 5 * 1024 * 1024, - NORMAL_DISPATCH_RATIO, - ); - - /// Importing a block with 0 Extrinsics. - pub const BlockExecutionWeight: Weight = Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS, 0) - .saturating_mul(5_000_000); - /// Executing a NO-OP `System::remarks` Extrinsic. - pub const ExtrinsicBaseWeight: Weight = Weight::from_parts(constants::WEIGHT_REF_TIME_PER_NANOS, 0) - .saturating_mul(125_000); - - /// Weight limit of the Cumulus-based bridge hub blocks. - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have an extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT, - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); - - /// Weight limit of the Cumulus-based bridge hub blocks when async backing is enabled. - pub BlockWeightsForAsyncBacking: limits::BlockWeights = limits::BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT_FOR_ASYNC_BACKING); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT_FOR_ASYNC_BACKING); - // Operational transactions have an extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT_FOR_ASYNC_BACKING`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT_FOR_ASYNC_BACKING - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT_FOR_ASYNC_BACKING, - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// The address format for describing accounts. -pub type Address = MultiAddress; - -// Note about selecting values of two following constants: -// -// Normal transactions have limit of 75% of 1/2 second weight for Cumulus parachains. Let's keep -// some reserve for the rest of stuff there => let's select values that fit in 50% of maximal limit. -// -// Using current constants, the limit would be: -// -// `75% * WEIGHT_REF_TIME_PER_SECOND * 1 / 2 * 50% = 0.75 * 1_000_000_000_000 / 2 * 0.5 = -// 187_500_000_000` -// -// According to (preliminary) weights of messages pallet, cost of additional message is zero and the -// cost of additional relayer is `8_000_000 + db read + db write`. Let's say we want no more than -// 4096 unconfirmed messages (no any scientific justification for that - it just looks large -// enough). And then we can't have more than 4096 relayers. E.g. for 1024 relayers is (using -// `RocksDbWeight`): -// -// `1024 * (8_000_000 + db read + db write) = 1024 * (8_000_000 + 25_000_000 + 100_000_000) = -// 136_192_000_000` -// -// So 1024 looks like good approximation for the number of relayers. If something is wrong in those -// assumptions, or something will change, it shall be caught by the -// `ensure_able_to_receive_confirmation` test. - -/// Maximal number of unrewarded relayer entries at inbound lane for Cumulus-based parachains. -/// Note: this value is security-relevant, decreasing it should not be done without careful -/// analysis (like the one above). -pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; - -/// Maximal number of unconfirmed messages at inbound lane for Cumulus-based parachains. -/// Note: this value is security-relevant, decreasing it should not be done without careful -/// analysis (like the one above). -pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; - -/// Signed extension that is used by all bridge hubs. -pub type TransactionExtension = SuffixedCommonTransactionExtension<( - BridgeRejectObsoleteHeadersAndMessages, - RefundBridgedParachainMessagesSchema, -)>; diff --git a/primitives/chain-bridge-hub-kusama/Cargo.toml b/primitives/chain-bridge-hub-kusama/Cargo.toml deleted file mode 100644 index 2075fe2543c6783326e6cb72756fdb2973f72340..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-kusama/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-bridge-hub-kusama" -description = "Primitives of BridgeHubKusama parachain runtime." -version = "0.6.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -# Bridge Dependencies - -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-bridge-hub-cumulus/std", - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/chain-bridge-hub-kusama/src/lib.rs b/primitives/chain-bridge-hub-kusama/src/lib.rs deleted file mode 100644 index ef3ef4ab7b7a9bc111218e3c53091ac232f34721..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-kusama/src/lib.rs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubKusama runtime setup (AccountId, Headers, -//! Hashes...) - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, -}; -use frame_support::{ - dispatch::DispatchClass, - sp_runtime::{MultiAddress, MultiSigner}, -}; -use sp_runtime::RuntimeDebug; - -/// BridgeHubKusama parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubKusama; - -impl Chain for BridgeHubKusama { - const ID: ChainId = *b"bhks"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubKusama { - const PARACHAIN_ID: u32 = BRIDGE_HUB_KUSAMA_PARACHAIN_ID; - const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; -} - -impl ChainWithMessages for BridgeHubKusama { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - WITH_BRIDGE_HUB_KUSAMA_MESSAGES_PALLET_NAME; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; -} - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// The address format for describing accounts. -pub type Address = MultiAddress; - -/// Identifier of BridgeHubKusama in the Kusama relay chain. -pub const BRIDGE_HUB_KUSAMA_PARACHAIN_ID: u32 = 1002; - -/// Name of the With-BridgeHubKusama messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_KUSAMA_MESSAGES_PALLET_NAME: &str = "BridgeKusamaMessages"; - -/// Name of the With-BridgeHubKusama bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_KUSAMA_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -decl_bridge_finality_runtime_apis!(bridge_hub_kusama); -decl_bridge_messages_runtime_apis!(bridge_hub_kusama); diff --git a/primitives/chain-bridge-hub-polkadot/Cargo.toml b/primitives/chain-bridge-hub-polkadot/Cargo.toml deleted file mode 100644 index edef6e612372feb8cafef1c822482dee26ba4270..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "bp-bridge-hub-polkadot" -description = "Primitives of BridgeHubPolkadot parachain runtime." -version = "0.6.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] - -# Bridge Dependencies - -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-bridge-hub-cumulus/std", - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/chain-bridge-hub-polkadot/src/lib.rs b/primitives/chain-bridge-hub-polkadot/src/lib.rs deleted file mode 100644 index 9db71af928e5df01170cf4ab8bf5f20cd72f7610..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-polkadot/src/lib.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubPolkadot runtime setup -//! (AccountId, Headers, Hashes...) - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, -}; -use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; - -/// BridgeHubPolkadot parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubPolkadot; - -impl Chain for BridgeHubPolkadot { - const ID: ChainId = *b"bhpd"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubPolkadot { - const PARACHAIN_ID: u32 = BRIDGE_HUB_POLKADOT_PARACHAIN_ID; - const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; -} - -impl ChainWithMessages for BridgeHubPolkadot { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - WITH_BRIDGE_HUB_POLKADOT_MESSAGES_PALLET_NAME; - - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; -} - -/// Identifier of BridgeHubPolkadot in the Polkadot relay chain. -pub const BRIDGE_HUB_POLKADOT_PARACHAIN_ID: u32 = 1002; - -/// Name of the With-BridgeHubPolkadot messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_POLKADOT_MESSAGES_PALLET_NAME: &str = "BridgePolkadotMessages"; - -/// Name of the With-BridgeHubPolkadot bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_POLKADOT_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -decl_bridge_finality_runtime_apis!(bridge_hub_polkadot); -decl_bridge_messages_runtime_apis!(bridge_hub_polkadot); diff --git a/primitives/chain-bridge-hub-rococo/Cargo.toml b/primitives/chain-bridge-hub-rococo/Cargo.toml deleted file mode 100644 index 0370fa1c6bb2c4c9fc6067ee2bc0cbb7a1461078..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-rococo/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "bp-bridge-hub-rococo" -description = "Primitives of BridgeHubRococo parachain runtime." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -# Bridge Dependencies - -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-bridge-hub-cumulus/std", - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/chain-bridge-hub-rococo/src/lib.rs b/primitives/chain-bridge-hub-rococo/src/lib.rs deleted file mode 100644 index c730a6fac7f99fd1bf406f3f9e7501ab17733a3b..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-rococo/src/lib.rs +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubRococo runtime setup (AccountId, Headers, -//! Hashes...) - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, -}; -use frame_support::dispatch::DispatchClass; -use sp_runtime::{MultiAddress, MultiSigner, RuntimeDebug}; - -/// BridgeHubRococo parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubRococo; - -impl Chain for BridgeHubRococo { - const ID: ChainId = *b"bhro"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeightsForAsyncBacking::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubRococo { - const PARACHAIN_ID: u32 = BRIDGE_HUB_ROCOCO_PARACHAIN_ID; - const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; -} - -impl ChainWithMessages for BridgeHubRococo { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME; - - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; -} - -/// Public key of the chain account that may be used to verify signatures. -pub type AccountSigner = MultiSigner; - -/// The address format for describing accounts. -pub type Address = MultiAddress; - -/// Identifier of BridgeHubRococo in the Rococo relay chain. -pub const BRIDGE_HUB_ROCOCO_PARACHAIN_ID: u32 = 1013; - -/// Name of the With-BridgeHubRococo messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_ROCOCO_MESSAGES_PALLET_NAME: &str = "BridgeRococoMessages"; - -/// Name of the With-BridgeHubRococo bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_ROCOCO_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -/// Pallet index of `BridgeWestendMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_ROCOCO_TO_WESTEND_MESSAGES_PALLET_INDEX: u8 = 51; -/// Pallet index of `BridgePolkadotBulletinMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_ROCOCO_TO_BULLETIN_MESSAGES_PALLET_INDEX: u8 = 61; - -decl_bridge_finality_runtime_apis!(bridge_hub_rococo); -decl_bridge_messages_runtime_apis!(bridge_hub_rococo); - -frame_support::parameter_types! { - /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Rococo - /// BridgeHub. - /// (initially was calculated by test `BridgeHubRococo::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) - pub const BridgeHubRococoBaseXcmFeeInRocs: u128 = 59_034_266; - - /// Transaction fee that is paid at the Rococo BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubRococoBaseDeliveryFeeInRocs: u128 = 5_651_581_649; - - /// Transaction fee that is paid at the Rococo BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubRococo::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubRococoBaseConfirmationFeeInRocs: u128 = 5_380_904_835; -} diff --git a/primitives/chain-bridge-hub-westend/Cargo.toml b/primitives/chain-bridge-hub-westend/Cargo.toml deleted file mode 100644 index ea452d89dba54b53f5597e249c04ed6b90779358..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-westend/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "bp-bridge-hub-westend" -description = "Primitives of BridgeHubWestend parachain runtime." -version = "0.3.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] - -# Bridge Dependencies - -bp-bridge-hub-cumulus = { path = "../chain-bridge-hub-cumulus", default-features = false } -bp-runtime = { path = "../../primitives/runtime", default-features = false } -bp-messages = { path = "../../primitives/messages", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-bridge-hub-cumulus/std", - "bp-messages/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/chain-bridge-hub-westend/src/lib.rs b/primitives/chain-bridge-hub-westend/src/lib.rs deleted file mode 100644 index 8c4e03db025f054a464ccf6078bdc2cb19f70944..0000000000000000000000000000000000000000 --- a/primitives/chain-bridge-hub-westend/src/lib.rs +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Module with configuration which reflects BridgeHubWestend runtime setup -//! (AccountId, Headers, Hashes...) - -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_bridge_hub_cumulus::*; -use bp_messages::*; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, Chain, ChainId, Parachain, -}; -use frame_support::dispatch::DispatchClass; -use sp_runtime::RuntimeDebug; - -/// BridgeHubWestend parachain. -#[derive(RuntimeDebug)] -pub struct BridgeHubWestend; - -impl Chain for BridgeHubWestend { - const ID: ChainId = *b"bhwd"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeightsForAsyncBacking::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl Parachain for BridgeHubWestend { - const PARACHAIN_ID: u32 = BRIDGE_HUB_WESTEND_PARACHAIN_ID; - const MAX_HEADER_SIZE: u32 = MAX_BRIDGE_HUB_HEADER_SIZE; -} - -impl ChainWithMessages for BridgeHubWestend { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME; - - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; -} - -/// Identifier of BridgeHubWestend in the Westend relay chain. -pub const BRIDGE_HUB_WESTEND_PARACHAIN_ID: u32 = 1002; - -/// Name of the With-BridgeHubWestend messages pallet instance that is deployed at bridged chains. -pub const WITH_BRIDGE_HUB_WESTEND_MESSAGES_PALLET_NAME: &str = "BridgeWestendMessages"; - -/// Name of the With-BridgeHubWestend bridge-relayers pallet instance that is deployed at bridged -/// chains. -pub const WITH_BRIDGE_HUB_WESTEND_RELAYERS_PALLET_NAME: &str = "BridgeRelayers"; - -/// Pallet index of `BridgeRococoMessages: pallet_bridge_messages::`. -pub const WITH_BRIDGE_WESTEND_TO_ROCOCO_MESSAGES_PALLET_INDEX: u8 = 44; - -decl_bridge_finality_runtime_apis!(bridge_hub_westend); -decl_bridge_messages_runtime_apis!(bridge_hub_westend); - -frame_support::parameter_types! { - /// The XCM fee that is paid for executing XCM program (with `ExportMessage` instruction) at the Westend - /// BridgeHub. - /// (initially was calculated by test `BridgeHubWestend::can_calculate_weight_for_paid_export_message_with_reserve_transfer` + `33%`) - pub const BridgeHubWestendBaseXcmFeeInWnds: u128 = 17_756_830_000; - - /// Transaction fee that is paid at the Westend BridgeHub for delivering single inbound message. - /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_delivery_transaction` + `33%`) - pub const BridgeHubWestendBaseDeliveryFeeInWnds: u128 = 1_695_489_961_344; - - /// Transaction fee that is paid at the Westend BridgeHub for delivering single outbound message confirmation. - /// (initially was calculated by test `BridgeHubWestend::can_calculate_fee_for_complex_message_confirmation_transaction` + `33%`) - pub const BridgeHubWestendBaseConfirmationFeeInWnds: u128 = 1_618_309_961_344; -} diff --git a/primitives/chain-kusama/Cargo.toml b/primitives/chain-kusama/Cargo.toml deleted file mode 100644 index 465acf121d408ac60d13dad1e6e12ffd73f01327..0000000000000000000000000000000000000000 --- a/primitives/chain-kusama/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "bp-kusama" -description = "Primitives of Kusama runtime." -version = "0.5.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/primitives/chain-kusama/src/lib.rs b/primitives/chain-kusama/src/lib.rs deleted file mode 100644 index 50660fd846735b5d26cd34914226e4d4a010bb27..0000000000000000000000000000000000000000 --- a/primitives/chain-kusama/src/lib.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of the Kusama chain. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_polkadot_core::*; - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; - -/// Kusama Chain -pub struct Kusama; - -impl Chain for Kusama { - const ID: ChainId = *b"ksma"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Kusama { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_KUSAMA_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; - const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; -} - -// The TransactionExtension used by Kusama. -pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; - -/// Name of the parachains pallet in the Kusama runtime. -pub const PARAS_PALLET_NAME: &str = "Paras"; - -/// Name of the With-Kusama GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_KUSAMA_GRANDPA_PALLET_NAME: &str = "BridgeKusamaGrandpa"; -/// Name of the With-Kusama parachains pallet instance that is deployed at bridged chains. -pub const WITH_KUSAMA_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeKusamaParachains"; - -/// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Polkadot -/// parachains. -/// -/// It includes the block number and state root, so it shall be near 40 bytes, but let's have some -/// reserve. -pub const MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE: u32 = 128; - -decl_bridge_finality_runtime_apis!(kusama, grandpa); diff --git a/primitives/chain-polkadot-bulletin/Cargo.toml b/primitives/chain-polkadot-bulletin/Cargo.toml deleted file mode 100644 index 86f05624d016d7f73f31e356d6c0a7c891851af7..0000000000000000000000000000000000000000 --- a/primitives/chain-polkadot-bulletin/Cargo.toml +++ /dev/null @@ -1,45 +0,0 @@ -[package] -name = "bp-polkadot-bulletin" -description = "Primitives of Polkadot Bulletin chain runtime." -version = "0.4.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-messages = { path = "../messages", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-messages/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "scale-info/std", - "sp-api/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/chain-polkadot-bulletin/src/lib.rs b/primitives/chain-polkadot-bulletin/src/lib.rs deleted file mode 100644 index 73dd122bd153869b937ed65f8e7ea7f4dde79c7c..0000000000000000000000000000000000000000 --- a/primitives/chain-polkadot-bulletin/src/lib.rs +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Polkadot Bulletin Chain primitives. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_header_chain::ChainWithGrandpa; -use bp_messages::{ChainWithMessages, MessageNonce}; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, decl_bridge_messages_runtime_apis, - extensions::{ - CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, CheckSpecVersion, CheckTxVersion, - CheckWeight, GenericTransactionExtension, GenericTransactionExtensionSchema, - }, - Chain, ChainId, TransactionEra, -}; -use codec::{Decode, Encode}; -use frame_support::{ - dispatch::DispatchClass, - parameter_types, - weights::{constants::WEIGHT_REF_TIME_PER_SECOND, Weight}, -}; -use frame_system::limits; -use scale_info::TypeInfo; -use sp_runtime::{ - impl_tx_ext_default, - traits::{Dispatchable, TransactionExtensionBase}, - transaction_validity::TransactionValidityError, - Perbill, -}; - -// This chain reuses most of Polkadot primitives. -pub use bp_polkadot_core::{ - AccountAddress, AccountId, Balance, Block, BlockNumber, Hash, Hasher, Header, Nonce, Signature, - SignedBlock, UncheckedExtrinsic, AVERAGE_HEADER_SIZE, EXTRA_STORAGE_PROOF_SIZE, - MAX_MANDATORY_HEADER_SIZE, REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY, -}; - -/// Maximal number of GRANDPA authorities at Polkadot Bulletin chain. -pub const MAX_AUTHORITIES_COUNT: u32 = 100; - -/// Name of the With-Polkadot Bulletin chain GRANDPA pallet instance that is deployed at bridged -/// chains. -pub const WITH_POLKADOT_BULLETIN_GRANDPA_PALLET_NAME: &str = "BridgePolkadotBulletinGrandpa"; -/// Name of the With-Polkadot Bulletin chain messages pallet instance that is deployed at bridged -/// chains. -pub const WITH_POLKADOT_BULLETIN_MESSAGES_PALLET_NAME: &str = "BridgePolkadotBulletinMessages"; - -// There are fewer system operations on this chain (e.g. staking, governance, etc.). Use a higher -// percentage of the block for data storage. -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(90); - -// Re following constants - we are using the same values at Cumulus parachains. They are limited -// by the maximal transaction weight/size. Since block limits at Bulletin Chain are larger than -// at the Cumulus Bridgeg Hubs, we could reuse the same values. - -/// Maximal number of unrewarded relayer entries at inbound lane for Cumulus-based parachains. -pub const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = 1024; - -/// Maximal number of unconfirmed messages at inbound lane for Cumulus-based parachains. -pub const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = 4096; - -/// This signed extension is used to ensure that the chain transactions are signed by proper -pub type ValidateSigned = GenericTransactionExtensionSchema<(), ()>; - -/// Signed extension schema, used by Polkadot Bulletin. -pub type TransactionExtensionSchema = GenericTransactionExtension<( - ( - CheckNonZeroSender, - CheckSpecVersion, - CheckTxVersion, - CheckGenesis, - CheckEra, - CheckNonce, - CheckWeight, - ), - ValidateSigned, -)>; - -/// Transaction extension, used by Polkadot Bulletin. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct TransactionExtension(TransactionExtensionSchema); - -impl TransactionExtensionBase for TransactionExtension { - const IDENTIFIER: &'static str = "Not needed."; - type Implicit = ::Implicit; - - fn implicit(&self) -> Result { - ::implicit(&self.0) - } -} - -impl sp_runtime::traits::TransactionExtension for TransactionExtension -where - C: Dispatchable, -{ - type Pre = (); - type Val = (); - - impl_tx_ext_default!(C; Context; validate prepare); -} - -impl TransactionExtension { - /// Create signed extension from its components. - pub fn from_params( - spec_version: u32, - transaction_version: u32, - era: TransactionEra, - genesis_hash: Hash, - nonce: Nonce, - ) -> Self { - Self(GenericTransactionExtension::new( - ( - ( - (), // non-zero sender - (), // spec version - (), // tx version - (), // genesis - era.frame_era(), // era - nonce.into(), // nonce (compact encoding) - (), // Check weight - ), - (), - ), - Some(( - ( - (), - spec_version, - transaction_version, - genesis_hash, - era.signed_payload(genesis_hash), - (), - (), - ), - (), - )), - )) - } - - /// Return transaction nonce. - pub fn nonce(&self) -> Nonce { - let common_payload = self.0.payload.0; - common_payload.5 .0 - } -} - -parameter_types! { - /// We allow for 2 seconds of compute with a 6 second average block time. - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::with_sensible_defaults( - Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), - NORMAL_DISPATCH_RATIO, - ); - // Note: Max transaction size is 8 MB. Set max block size to 10 MB to facilitate data storage. - // This is double the "normal" Relay Chain block length limit. - /// Maximal block length at Polkadot Bulletin chain. - pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio( - 10 * 1024 * 1024, - NORMAL_DISPATCH_RATIO, - ); -} - -/// Polkadot Bulletin Chain declaration. -pub struct PolkadotBulletin; - -impl Chain for PolkadotBulletin { - const ID: ChainId = *b"pdbc"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - // The Bulletin Chain is a permissioned blockchain without any balances. Our `Chain` trait - // requires balance type, which is then used by various bridge infrastructure code. However - // this code is optional and we are not planning to use it in our bridge. - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) - } - - fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) - } -} - -impl ChainWithGrandpa for PolkadotBulletin { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_BULLETIN_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; - const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; -} - -impl ChainWithMessages for PolkadotBulletin { - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - WITH_POLKADOT_BULLETIN_MESSAGES_PALLET_NAME; - - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; -} - -decl_bridge_finality_runtime_apis!(polkadot_bulletin, grandpa); -decl_bridge_messages_runtime_apis!(polkadot_bulletin); diff --git a/primitives/chain-polkadot/Cargo.toml b/primitives/chain-polkadot/Cargo.toml deleted file mode 100644 index 20080a7901b6996dd516ad38d73771b975a827d3..0000000000000000000000000000000000000000 --- a/primitives/chain-polkadot/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "bp-polkadot" -description = "Primitives of Polkadot runtime." -version = "0.5.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/primitives/chain-polkadot/src/lib.rs b/primitives/chain-polkadot/src/lib.rs deleted file mode 100644 index 458c6545149f4b884a37d694d408c96c9c19c0dc..0000000000000000000000000000000000000000 --- a/primitives/chain-polkadot/src/lib.rs +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of the Polkadot chain. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_polkadot_core::*; - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{ - decl_bridge_finality_runtime_apis, extensions::PrevalidateAttests, Chain, ChainId, -}; -use frame_support::weights::Weight; - -/// Polkadot Chain -pub struct Polkadot; - -impl Chain for Polkadot { - const ID: ChainId = *b"pdot"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Polkadot { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_POLKADOT_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; - const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; -} - -/// The TransactionExtension used by Polkadot. -pub type TransactionExtension = SuffixedCommonTransactionExtension; - -/// Name of the parachains pallet in the Polkadot runtime. -pub const PARAS_PALLET_NAME: &str = "Paras"; - -/// Name of the With-Polkadot GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_POLKADOT_GRANDPA_PALLET_NAME: &str = "BridgePolkadotGrandpa"; -/// Name of the With-Polkadot parachains pallet instance that is deployed at bridged chains. -pub const WITH_POLKADOT_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgePolkadotParachains"; - -/// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Polkadot -/// parachains. -/// -/// It includes the block number and state root, so it shall be near 40 bytes, but let's have some -/// reserve. -pub const MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE: u32 = 128; - -decl_bridge_finality_runtime_apis!(polkadot, grandpa); diff --git a/primitives/chain-rococo/Cargo.toml b/primitives/chain-rococo/Cargo.toml deleted file mode 100644 index 459ee6f9f0517d27487a9ae9edd6fa34b10ba6da..0000000000000000000000000000000000000000 --- a/primitives/chain-rococo/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "bp-rococo" -description = "Primitives of Rococo runtime." -version = "0.6.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/primitives/chain-rococo/src/lib.rs b/primitives/chain-rococo/src/lib.rs deleted file mode 100644 index 46f416963dbdad3e0502f4019f678d45ea0fcf9a..0000000000000000000000000000000000000000 --- a/primitives/chain-rococo/src/lib.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of the Rococo chain. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_polkadot_core::*; - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; - -/// Rococo Chain -pub struct Rococo; - -impl Chain for Rococo { - const ID: ChainId = *b"roco"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Rococo { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_ROCOCO_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; - const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; -} - -// The TransactionExtension used by Rococo. -pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; - -/// Name of the parachains pallet in the Rococo runtime. -pub const PARAS_PALLET_NAME: &str = "Paras"; - -/// Name of the With-Rococo GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_ROCOCO_GRANDPA_PALLET_NAME: &str = "BridgeRococoGrandpa"; -/// Name of the With-Rococo parachains pallet instance that is deployed at bridged chains. -pub const WITH_ROCOCO_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeRococoParachains"; - -/// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Rococo -/// parachains. -/// -/// It includes the block number and state root, so it shall be near 40 bytes, but let's have some -/// reserve. -pub const MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE: u32 = 128; - -decl_bridge_finality_runtime_apis!(rococo, grandpa); diff --git a/primitives/chain-westend/Cargo.toml b/primitives/chain-westend/Cargo.toml deleted file mode 100644 index 4710da7bb335d2c883e396af993bcda34b4424ec..0000000000000000000000000000000000000000 --- a/primitives/chain-westend/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "bp-westend" -description = "Primitives of Westend runtime." -version = "0.3.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] - -# Bridge Dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "frame-support/std", - "sp-api/std", - "sp-std/std", -] diff --git a/primitives/chain-westend/src/lib.rs b/primitives/chain-westend/src/lib.rs deleted file mode 100644 index 2aa4377bad271eab30c308892691e043f30fabd5..0000000000000000000000000000000000000000 --- a/primitives/chain-westend/src/lib.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of the Westend chain. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_polkadot_core::*; - -use bp_header_chain::ChainWithGrandpa; -use bp_runtime::{decl_bridge_finality_runtime_apis, Chain, ChainId}; -use frame_support::weights::Weight; - -/// Westend Chain -pub struct Westend; - -impl Chain for Westend { - const ID: ChainId = *b"wend"; - - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hasher = Hasher; - type Header = Header; - - type AccountId = AccountId; - type Balance = Balance; - type Nonce = Nonce; - type Signature = Signature; - - fn max_extrinsic_size() -> u32 { - max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - max_extrinsic_weight() - } -} - -impl ChainWithGrandpa for Westend { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = WITH_WESTEND_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_MANDATORY_HEADER_SIZE: u32 = MAX_MANDATORY_HEADER_SIZE; - const AVERAGE_HEADER_SIZE: u32 = AVERAGE_HEADER_SIZE; -} - -// The TransactionExtension used by Westend. -pub use bp_polkadot_core::CommonTransactionExtension as TransactionExtension; - -/// Name of the parachains pallet in the Rococo runtime. -pub const PARAS_PALLET_NAME: &str = "Paras"; - -/// Name of the With-Westend GRANDPA pallet instance that is deployed at bridged chains. -pub const WITH_WESTEND_GRANDPA_PALLET_NAME: &str = "BridgeWestendGrandpa"; -/// Name of the With-Westend parachains pallet instance that is deployed at bridged chains. -pub const WITH_WESTEND_BRIDGE_PARACHAINS_PALLET_NAME: &str = "BridgeWestendParachains"; - -/// Maximal size of encoded `bp_parachains::ParaStoredHeaderData` structure among all Westend -/// parachains. -/// -/// It includes the block number and state root, so it shall be near 40 bytes, but let's have some -/// reserve. -pub const MAX_NESTED_PARACHAIN_HEAD_DATA_SIZE: u32 = 128; - -decl_bridge_finality_runtime_apis!(westend, grandpa); diff --git a/primitives/header-chain/Cargo.toml b/primitives/header-chain/Cargo.toml deleted file mode 100644 index 7167c41046025bb7fdcae0d63cac0251d419b139..0000000000000000000000000000000000000000 --- a/primitives/header-chain/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "bp-header-chain" -description = "A common interface for describing what a bridge pallet should be able to do." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { features = ["alloc", "derive"], workspace = true } - -# Bridge dependencies - -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -bp-test-utils = { path = "../test-utils" } -hex = "0.4" -hex-literal = "0.4" - -[features] -default = ["std"] -std = [ - "bp-runtime/std", - "codec/std", - "finality-grandpa/std", - "frame-support/std", - "scale-info/std", - "serde/std", - "sp-consensus-grandpa/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/header-chain/src/justification/mod.rs b/primitives/header-chain/src/justification/mod.rs deleted file mode 100644 index b32d8bdb5f1d8ce05722c938a083d7f582139835..0000000000000000000000000000000000000000 --- a/primitives/header-chain/src/justification/mod.rs +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Logic for checking GRANDPA Finality Proofs. -//! -//! Adapted copy of substrate/client/finality-grandpa/src/justification.rs. If origin -//! will ever be moved to the sp_consensus_grandpa, we should reuse that implementation. - -mod verification; - -use crate::ChainWithGrandpa; -pub use verification::{ - equivocation::{EquivocationsCollector, GrandpaEquivocationsFinder}, - optimizer::verify_and_optimize_justification, - strict::verify_justification, - AncestryChain, Error as JustificationVerificationError, JustificationVerificationContext, - PrecommitError, -}; - -use bp_runtime::{BlockNumberOf, Chain, HashOf, HeaderId}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::RuntimeDebugNoBound; -use scale_info::TypeInfo; -use sp_consensus_grandpa::{AuthorityId, AuthoritySignature}; -use sp_runtime::{traits::Header as HeaderT, RuntimeDebug, SaturatedConversion}; -use sp_std::prelude::*; - -/// A GRANDPA Justification is a proof that a given header was finalized -/// at a certain height and with a certain set of authorities. -/// -/// This particular proof is used to prove that headers on a bridged chain -/// (so not our chain) have been finalized correctly. -#[derive(Encode, Decode, Clone, PartialEq, Eq, TypeInfo, RuntimeDebugNoBound)] -pub struct GrandpaJustification { - /// The round (voting period) this justification is valid for. - pub round: u64, - /// The set of votes for the chain which is to be finalized. - pub commit: - finality_grandpa::Commit, - /// A proof that the chain of blocks in the commit are related to each other. - pub votes_ancestries: Vec
, -} - -impl GrandpaJustification { - /// Returns reasonable size of justification using constants from the provided chain. - /// - /// An imprecise analogue of `MaxEncodedLen` implementation. We don't use it for - /// any precise calculations - that's just an estimation. - pub fn max_reasonable_size(required_precommits: u32) -> u32 - where - C: Chain + ChainWithGrandpa, - { - // we don't need precise results here - just estimations, so some details - // are removed from computations (e.g. bytes required to encode vector length) - - // structures in `finality_grandpa` crate are not implementing `MaxEncodedLength`, so - // here's our estimation for the `finality_grandpa::Commit` struct size - // - // precommit is: hash + number - // signed precommit is: precommit + signature (64b) + authority id - // commit is: hash + number + vec of signed precommits - let signed_precommit_size: u32 = BlockNumberOf::::max_encoded_len() - .saturating_add(HashOf::::max_encoded_len().saturated_into()) - .saturating_add(64) - .saturating_add(AuthorityId::max_encoded_len().saturated_into()) - .saturated_into(); - let max_expected_signed_commit_size = signed_precommit_size - .saturating_mul(required_precommits) - .saturating_add(BlockNumberOf::::max_encoded_len().saturated_into()) - .saturating_add(HashOf::::max_encoded_len().saturated_into()); - - let max_expected_votes_ancestries_size = - C::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY.saturating_mul(C::AVERAGE_HEADER_SIZE); - - // justification is round number (u64=8b), a signed GRANDPA commit and the - // `votes_ancestries` vector - 8u32.saturating_add(max_expected_signed_commit_size) - .saturating_add(max_expected_votes_ancestries_size) - } - - /// Return identifier of header that this justification claims to finalize. - pub fn commit_target_id(&self) -> HeaderId { - HeaderId(self.commit.target_number, self.commit.target_hash) - } -} - -impl crate::FinalityProof for GrandpaJustification { - fn target_header_hash(&self) -> H::Hash { - self.commit.target_hash - } - - fn target_header_number(&self) -> H::Number { - self.commit.target_number - } -} - -/// Justification verification error. -#[derive(Eq, RuntimeDebug, PartialEq)] -pub enum Error { - /// Failed to decode justification. - JustificationDecode, -} - -/// Given GRANDPA authorities set size, return number of valid authorities votes that the -/// justification must have to be valid. -/// -/// This function assumes that all authorities have the same vote weight. -pub fn required_justification_precommits(authorities_set_length: u32) -> u32 { - authorities_set_length - authorities_set_length.saturating_sub(1) / 3 -} - -/// Decode justification target. -pub fn decode_justification_target( - raw_justification: &[u8], -) -> Result<(Header::Hash, Header::Number), Error> { - GrandpaJustification::
::decode(&mut &*raw_justification) - .map(|justification| (justification.commit.target_hash, justification.commit.target_number)) - .map_err(|_| Error::JustificationDecode) -} diff --git a/primitives/header-chain/src/justification/verification/equivocation.rs b/primitives/header-chain/src/justification/verification/equivocation.rs deleted file mode 100644 index fbad301281994ae9e0ed32444c3373e032e4355e..0000000000000000000000000000000000000000 --- a/primitives/header-chain/src/justification/verification/equivocation.rs +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Logic for extracting equivocations from multiple GRANDPA Finality Proofs. - -use crate::{ - justification::{ - verification::{ - Error as JustificationVerificationError, IterationFlow, - JustificationVerificationContext, JustificationVerifier, PrecommitError, - SignedPrecommit, - }, - GrandpaJustification, - }, - ChainWithGrandpa, FindEquivocations, -}; - -use bp_runtime::{BlockNumberOf, HashOf, HeaderOf}; -use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, EquivocationProof, Precommit}; -use sp_runtime::traits::Header as HeaderT; -use sp_std::{ - collections::{btree_map::BTreeMap, btree_set::BTreeSet}, - prelude::*, -}; - -enum AuthorityVotes { - SingleVote(SignedPrecommit
), - Equivocation( - finality_grandpa::Equivocation, AuthoritySignature>, - ), -} - -/// Structure that can extract equivocations from multiple GRANDPA justifications. -pub struct EquivocationsCollector<'a, Header: HeaderT> { - round: u64, - context: &'a JustificationVerificationContext, - - votes: BTreeMap>, -} - -impl<'a, Header: HeaderT> EquivocationsCollector<'a, Header> { - /// Create a new instance of `EquivocationsCollector`. - pub fn new( - context: &'a JustificationVerificationContext, - base_justification: &GrandpaJustification
, - ) -> Result { - let mut checker = Self { round: base_justification.round, context, votes: BTreeMap::new() }; - - checker.verify_justification( - (base_justification.commit.target_hash, base_justification.commit.target_number), - checker.context, - base_justification, - )?; - - Ok(checker) - } - - /// Parse additional justifications for equivocations. - pub fn parse_justifications(&mut self, justifications: &[GrandpaJustification
]) { - let round = self.round; - for justification in - justifications.iter().filter(|justification| round == justification.round) - { - // We ignore the Errors received here since we don't care if the proofs are valid. - // We only care about collecting equivocations. - let _ = self.verify_justification( - (justification.commit.target_hash, justification.commit.target_number), - self.context, - justification, - ); - } - } - - /// Extract the equivocation proofs that have been collected. - pub fn into_equivocation_proofs(self) -> Vec> { - let mut equivocations = vec![]; - for (_authority, vote) in self.votes { - if let AuthorityVotes::Equivocation(equivocation) = vote { - equivocations.push(EquivocationProof::new( - self.context.authority_set_id, - sp_consensus_grandpa::Equivocation::Precommit(equivocation), - )); - } - } - - equivocations - } -} - -impl<'a, Header: HeaderT> JustificationVerifier
for EquivocationsCollector<'a, Header> { - fn process_duplicate_votes_ancestries( - &mut self, - _duplicate_votes_ancestries: Vec, - ) -> Result<(), JustificationVerificationError> { - Ok(()) - } - - fn process_redundant_vote( - &mut self, - _precommit_idx: usize, - ) -> Result { - Ok(IterationFlow::Run) - } - - fn process_known_authority_vote( - &mut self, - _precommit_idx: usize, - _signed: &SignedPrecommit
, - ) -> Result { - Ok(IterationFlow::Run) - } - - fn process_unknown_authority_vote( - &mut self, - _precommit_idx: usize, - ) -> Result<(), PrecommitError> { - Ok(()) - } - - fn process_unrelated_ancestry_vote( - &mut self, - _precommit_idx: usize, - ) -> Result { - Ok(IterationFlow::Run) - } - - fn process_invalid_signature_vote( - &mut self, - _precommit_idx: usize, - ) -> Result<(), PrecommitError> { - Ok(()) - } - - fn process_valid_vote(&mut self, signed: &SignedPrecommit
) { - match self.votes.get_mut(&signed.id) { - Some(vote) => match vote { - AuthorityVotes::SingleVote(first_vote) => { - if first_vote.precommit != signed.precommit { - *vote = AuthorityVotes::Equivocation(finality_grandpa::Equivocation { - round_number: self.round, - identity: signed.id.clone(), - first: (first_vote.precommit.clone(), first_vote.signature.clone()), - second: (signed.precommit.clone(), signed.signature.clone()), - }); - } - }, - AuthorityVotes::Equivocation(_) => {}, - }, - None => { - self.votes.insert(signed.id.clone(), AuthorityVotes::SingleVote(signed.clone())); - }, - } - } - - fn process_redundant_votes_ancestries( - &mut self, - _redundant_votes_ancestries: BTreeSet, - ) -> Result<(), JustificationVerificationError> { - Ok(()) - } -} - -/// Helper struct for finding equivocations in GRANDPA proofs. -pub struct GrandpaEquivocationsFinder(sp_std::marker::PhantomData); - -impl - FindEquivocations< - GrandpaJustification>, - JustificationVerificationContext, - EquivocationProof, BlockNumberOf>, - > for GrandpaEquivocationsFinder -{ - type Error = JustificationVerificationError; - - fn find_equivocations( - verification_context: &JustificationVerificationContext, - synced_proof: &GrandpaJustification>, - source_proofs: &[GrandpaJustification>], - ) -> Result, BlockNumberOf>>, Self::Error> { - let mut equivocations_collector = - EquivocationsCollector::new(verification_context, synced_proof)?; - - equivocations_collector.parse_justifications(source_proofs); - - Ok(equivocations_collector.into_equivocation_proofs()) - } -} diff --git a/primitives/header-chain/src/justification/verification/mod.rs b/primitives/header-chain/src/justification/verification/mod.rs deleted file mode 100644 index c71149bf9c28e350fb43429623ca47cd367b9091..0000000000000000000000000000000000000000 --- a/primitives/header-chain/src/justification/verification/mod.rs +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Logic for checking GRANDPA Finality Proofs. - -pub mod equivocation; -pub mod optimizer; -pub mod strict; - -use crate::{justification::GrandpaJustification, AuthoritySet}; - -use bp_runtime::HeaderId; -use finality_grandpa::voter_set::VoterSet; -use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, SetId}; -use sp_runtime::{traits::Header as HeaderT, RuntimeDebug}; -use sp_std::{ - collections::{ - btree_map::{ - BTreeMap, - Entry::{Occupied, Vacant}, - }, - btree_set::BTreeSet, - }, - prelude::*, -}; - -type SignedPrecommit
= finality_grandpa::SignedPrecommit< -
::Hash, -
::Number, - AuthoritySignature, - AuthorityId, ->; - -/// Votes ancestries with useful methods. -#[derive(RuntimeDebug)] -pub struct AncestryChain { - /// We expect all forks in the ancestry chain to be descendants of base. - base: HeaderId, - /// Header hash => parent header hash mapping. - parents: BTreeMap, - /// Hashes of headers that were not visited by `ancestry()`. - unvisited: BTreeSet, -} - -impl AncestryChain
{ - /// Creates a new instance of `AncestryChain` starting from a `GrandpaJustification`. - /// - /// Returns the `AncestryChain` and a `Vec` containing the `votes_ancestries` entries - /// that were ignored when creating it, because they are duplicates. - pub fn new( - justification: &GrandpaJustification
, - ) -> (AncestryChain
, Vec) { - let mut parents = BTreeMap::new(); - let mut unvisited = BTreeSet::new(); - let mut ignored_idxs = Vec::new(); - for (idx, ancestor) in justification.votes_ancestries.iter().enumerate() { - let hash = ancestor.hash(); - match parents.entry(hash) { - Occupied(_) => { - ignored_idxs.push(idx); - }, - Vacant(entry) => { - entry.insert(*ancestor.parent_hash()); - unvisited.insert(hash); - }, - } - } - (AncestryChain { base: justification.commit_target_id(), parents, unvisited }, ignored_idxs) - } - - /// Returns the hash of a block's parent if the block is present in the ancestry. - pub fn parent_hash_of(&self, hash: &Header::Hash) -> Option<&Header::Hash> { - self.parents.get(hash) - } - - /// Returns a route if the precommit target block is a descendant of the `base` block. - pub fn ancestry( - &self, - precommit_target_hash: &Header::Hash, - precommit_target_number: &Header::Number, - ) -> Option> { - if precommit_target_number < &self.base.number() { - return None - } - - let mut route = vec![]; - let mut current_hash = *precommit_target_hash; - loop { - if current_hash == self.base.hash() { - break - } - - current_hash = match self.parent_hash_of(¤t_hash) { - Some(parent_hash) => { - let is_visited_before = self.unvisited.get(¤t_hash).is_none(); - if is_visited_before { - // If the current header has been visited in a previous call, it is a - // descendent of `base` (we assume that the previous call was successful). - return Some(route) - } - route.push(current_hash); - - *parent_hash - }, - None => return None, - }; - } - - Some(route) - } - - fn mark_route_as_visited(&mut self, route: Vec) { - for hash in route { - self.unvisited.remove(&hash); - } - } - - fn is_fully_visited(&self) -> bool { - self.unvisited.is_empty() - } -} - -/// Justification verification error. -#[derive(Eq, RuntimeDebug, PartialEq)] -pub enum Error { - /// Could not convert `AuthorityList` to `VoterSet`. - InvalidAuthorityList, - /// Justification is finalizing unexpected header. - InvalidJustificationTarget, - /// The justification contains duplicate headers in its `votes_ancestries` field. - DuplicateVotesAncestries, - /// Error validating a precommit - Precommit(PrecommitError), - /// The cumulative weight of all votes in the justification is not enough to justify commit - /// header finalization. - TooLowCumulativeWeight, - /// The justification contains extra (unused) headers in its `votes_ancestries` field. - RedundantVotesAncestries, -} - -/// Justification verification error. -#[derive(Eq, RuntimeDebug, PartialEq)] -pub enum PrecommitError { - /// Justification contains redundant votes. - RedundantAuthorityVote, - /// Justification contains unknown authority precommit. - UnknownAuthorityVote, - /// Justification contains duplicate authority precommit. - DuplicateAuthorityVote, - /// The authority has provided an invalid signature. - InvalidAuthoritySignature, - /// The justification contains precommit for header that is not a descendant of the commit - /// header. - UnrelatedAncestryVote, -} - -/// The context needed for validating GRANDPA finality proofs. -#[derive(RuntimeDebug)] -pub struct JustificationVerificationContext { - /// The authority set used to verify the justification. - pub voter_set: VoterSet, - /// The ID of the authority set used to verify the justification. - pub authority_set_id: SetId, -} - -impl TryFrom for JustificationVerificationContext { - type Error = Error; - - fn try_from(authority_set: AuthoritySet) -> Result { - let voter_set = - VoterSet::new(authority_set.authorities).ok_or(Error::InvalidAuthorityList)?; - Ok(JustificationVerificationContext { voter_set, authority_set_id: authority_set.set_id }) - } -} - -enum IterationFlow { - Run, - Skip, -} - -/// Verification callbacks. -trait JustificationVerifier { - /// Called when there are duplicate headers in the votes ancestries. - fn process_duplicate_votes_ancestries( - &mut self, - duplicate_votes_ancestries: Vec, - ) -> Result<(), Error>; - - fn process_redundant_vote( - &mut self, - precommit_idx: usize, - ) -> Result; - - fn process_known_authority_vote( - &mut self, - precommit_idx: usize, - signed: &SignedPrecommit
, - ) -> Result; - - fn process_unknown_authority_vote( - &mut self, - precommit_idx: usize, - ) -> Result<(), PrecommitError>; - - fn process_unrelated_ancestry_vote( - &mut self, - precommit_idx: usize, - ) -> Result; - - fn process_invalid_signature_vote( - &mut self, - precommit_idx: usize, - ) -> Result<(), PrecommitError>; - - fn process_valid_vote(&mut self, signed: &SignedPrecommit
); - - /// Called when there are redundant headers in the votes ancestries. - fn process_redundant_votes_ancestries( - &mut self, - redundant_votes_ancestries: BTreeSet, - ) -> Result<(), Error>; - - fn verify_justification( - &mut self, - finalized_target: (Header::Hash, Header::Number), - context: &JustificationVerificationContext, - justification: &GrandpaJustification
, - ) -> Result<(), Error> { - // ensure that it is justification for the expected header - if (justification.commit.target_hash, justification.commit.target_number) != - finalized_target - { - return Err(Error::InvalidJustificationTarget) - } - - let threshold = context.voter_set.threshold().get(); - let (mut chain, ignored_idxs) = AncestryChain::new(justification); - let mut signature_buffer = Vec::new(); - let mut cumulative_weight = 0u64; - - if !ignored_idxs.is_empty() { - self.process_duplicate_votes_ancestries(ignored_idxs)?; - } - - for (precommit_idx, signed) in justification.commit.precommits.iter().enumerate() { - if cumulative_weight >= threshold { - let action = - self.process_redundant_vote(precommit_idx).map_err(Error::Precommit)?; - if matches!(action, IterationFlow::Skip) { - continue - } - } - - // authority must be in the set - let authority_info = match context.voter_set.get(&signed.id) { - Some(authority_info) => { - // The implementer may want to do extra checks here. - // For example to see if the authority has already voted in the same round. - let action = self - .process_known_authority_vote(precommit_idx, signed) - .map_err(Error::Precommit)?; - if matches!(action, IterationFlow::Skip) { - continue - } - - authority_info - }, - None => { - self.process_unknown_authority_vote(precommit_idx).map_err(Error::Precommit)?; - continue - }, - }; - - // all precommits must be descendants of the target block - let maybe_route = - chain.ancestry(&signed.precommit.target_hash, &signed.precommit.target_number); - if maybe_route.is_none() { - let action = self - .process_unrelated_ancestry_vote(precommit_idx) - .map_err(Error::Precommit)?; - if matches!(action, IterationFlow::Skip) { - continue - } - } - - // verify authority signature - if !sp_consensus_grandpa::check_message_signature_with_buffer( - &finality_grandpa::Message::Precommit(signed.precommit.clone()), - &signed.id, - &signed.signature, - justification.round, - context.authority_set_id, - &mut signature_buffer, - ) { - self.process_invalid_signature_vote(precommit_idx).map_err(Error::Precommit)?; - continue - } - - // now we can count the vote since we know that it is valid - self.process_valid_vote(signed); - if let Some(route) = maybe_route { - chain.mark_route_as_visited(route); - cumulative_weight = cumulative_weight.saturating_add(authority_info.weight().get()); - } - } - - // check that the cumulative weight of validators that voted for the justification target - // (or one of its descendents) is larger than the required threshold. - if cumulative_weight < threshold { - return Err(Error::TooLowCumulativeWeight) - } - - // check that there are no extra headers in the justification - if !chain.is_fully_visited() { - self.process_redundant_votes_ancestries(chain.unvisited)?; - } - - Ok(()) - } -} diff --git a/primitives/header-chain/src/justification/verification/optimizer.rs b/primitives/header-chain/src/justification/verification/optimizer.rs deleted file mode 100644 index 3f1e6ab670ca65283b1efcecfb1cb163c3a742d2..0000000000000000000000000000000000000000 --- a/primitives/header-chain/src/justification/verification/optimizer.rs +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Logic for optimizing GRANDPA Finality Proofs. - -use crate::justification::{ - verification::{Error, JustificationVerifier, PrecommitError}, - GrandpaJustification, -}; - -use crate::justification::verification::{ - IterationFlow, JustificationVerificationContext, SignedPrecommit, -}; -use sp_consensus_grandpa::AuthorityId; -use sp_runtime::traits::Header as HeaderT; -use sp_std::{collections::btree_set::BTreeSet, prelude::*}; - -// Verification callbacks for justification optimization. -struct JustificationOptimizer { - votes: BTreeSet, - - extra_precommits: Vec, - duplicate_votes_ancestries_idxs: Vec, - redundant_votes_ancestries: BTreeSet, -} - -impl JustificationOptimizer
{ - fn optimize(self, justification: &mut GrandpaJustification
) { - for invalid_precommit_idx in self.extra_precommits.into_iter().rev() { - justification.commit.precommits.remove(invalid_precommit_idx); - } - if !self.duplicate_votes_ancestries_idxs.is_empty() { - for idx in self.duplicate_votes_ancestries_idxs.iter().rev() { - justification.votes_ancestries.swap_remove(*idx); - } - } - if !self.redundant_votes_ancestries.is_empty() { - justification - .votes_ancestries - .retain(|header| !self.redundant_votes_ancestries.contains(&header.hash())) - } - } -} - -impl JustificationVerifier
for JustificationOptimizer
{ - fn process_duplicate_votes_ancestries( - &mut self, - duplicate_votes_ancestries: Vec, - ) -> Result<(), Error> { - self.duplicate_votes_ancestries_idxs = duplicate_votes_ancestries.to_vec(); - Ok(()) - } - - fn process_redundant_vote( - &mut self, - precommit_idx: usize, - ) -> Result { - self.extra_precommits.push(precommit_idx); - Ok(IterationFlow::Skip) - } - - fn process_known_authority_vote( - &mut self, - precommit_idx: usize, - signed: &SignedPrecommit
, - ) -> Result { - // Skip duplicate votes - if self.votes.contains(&signed.id) { - self.extra_precommits.push(precommit_idx); - return Ok(IterationFlow::Skip) - } - - Ok(IterationFlow::Run) - } - - fn process_unknown_authority_vote( - &mut self, - precommit_idx: usize, - ) -> Result<(), PrecommitError> { - self.extra_precommits.push(precommit_idx); - Ok(()) - } - - fn process_unrelated_ancestry_vote( - &mut self, - precommit_idx: usize, - ) -> Result { - self.extra_precommits.push(precommit_idx); - Ok(IterationFlow::Skip) - } - - fn process_invalid_signature_vote( - &mut self, - precommit_idx: usize, - ) -> Result<(), PrecommitError> { - self.extra_precommits.push(precommit_idx); - Ok(()) - } - - fn process_valid_vote(&mut self, signed: &SignedPrecommit
) { - self.votes.insert(signed.id.clone()); - } - - fn process_redundant_votes_ancestries( - &mut self, - redundant_votes_ancestries: BTreeSet, - ) -> Result<(), Error> { - self.redundant_votes_ancestries = redundant_votes_ancestries; - Ok(()) - } -} - -/// Verify and optimize given justification by removing unknown and duplicate votes. -pub fn verify_and_optimize_justification( - finalized_target: (Header::Hash, Header::Number), - context: &JustificationVerificationContext, - justification: &mut GrandpaJustification
, -) -> Result<(), Error> { - let mut optimizer = JustificationOptimizer { - votes: BTreeSet::new(), - extra_precommits: vec![], - duplicate_votes_ancestries_idxs: vec![], - redundant_votes_ancestries: Default::default(), - }; - optimizer.verify_justification(finalized_target, context, justification)?; - optimizer.optimize(justification); - - Ok(()) -} diff --git a/primitives/header-chain/src/justification/verification/strict.rs b/primitives/header-chain/src/justification/verification/strict.rs deleted file mode 100644 index 858cf517a431e0ce958e06465afeba59c2cc0867..0000000000000000000000000000000000000000 --- a/primitives/header-chain/src/justification/verification/strict.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Logic for checking if GRANDPA Finality Proofs are valid and optimal. - -use crate::justification::{ - verification::{Error, JustificationVerifier, PrecommitError}, - GrandpaJustification, -}; - -use crate::justification::verification::{ - IterationFlow, JustificationVerificationContext, SignedPrecommit, -}; -use sp_consensus_grandpa::AuthorityId; -use sp_runtime::traits::Header as HeaderT; -use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; - -/// Verification callbacks that reject all unknown, duplicate or redundant votes. -struct StrictJustificationVerifier { - votes: BTreeSet, -} - -impl JustificationVerifier
for StrictJustificationVerifier { - fn process_duplicate_votes_ancestries( - &mut self, - _duplicate_votes_ancestries: Vec, - ) -> Result<(), Error> { - Err(Error::DuplicateVotesAncestries) - } - - fn process_redundant_vote( - &mut self, - _precommit_idx: usize, - ) -> Result { - Err(PrecommitError::RedundantAuthorityVote) - } - - fn process_known_authority_vote( - &mut self, - _precommit_idx: usize, - signed: &SignedPrecommit
, - ) -> Result { - if self.votes.contains(&signed.id) { - // There's a lot of code in `validate_commit` and `import_precommit` functions - // inside `finality-grandpa` crate (mostly related to reporting equivocations). - // But the only thing that we care about is that only first vote from the - // authority is accepted - return Err(PrecommitError::DuplicateAuthorityVote) - } - - Ok(IterationFlow::Run) - } - - fn process_unknown_authority_vote( - &mut self, - _precommit_idx: usize, - ) -> Result<(), PrecommitError> { - Err(PrecommitError::UnknownAuthorityVote) - } - - fn process_unrelated_ancestry_vote( - &mut self, - _precommit_idx: usize, - ) -> Result { - Err(PrecommitError::UnrelatedAncestryVote) - } - - fn process_invalid_signature_vote( - &mut self, - _precommit_idx: usize, - ) -> Result<(), PrecommitError> { - Err(PrecommitError::InvalidAuthoritySignature) - } - - fn process_valid_vote(&mut self, signed: &SignedPrecommit
) { - self.votes.insert(signed.id.clone()); - } - - fn process_redundant_votes_ancestries( - &mut self, - _redundant_votes_ancestries: BTreeSet, - ) -> Result<(), Error> { - Err(Error::RedundantVotesAncestries) - } -} - -/// Verify that justification, that is generated by given authority set, finalizes given header. -pub fn verify_justification( - finalized_target: (Header::Hash, Header::Number), - context: &JustificationVerificationContext, - justification: &GrandpaJustification
, -) -> Result<(), Error> { - let mut verifier = StrictJustificationVerifier { votes: BTreeSet::new() }; - verifier.verify_justification(finalized_target, context, justification) -} diff --git a/primitives/header-chain/src/lib.rs b/primitives/header-chain/src/lib.rs deleted file mode 100644 index 84a6a881a835b8afc3b5cde8992df1733859d29a..0000000000000000000000000000000000000000 --- a/primitives/header-chain/src/lib.rs +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Defines traits which represent a common interface for Substrate pallets which want to -//! incorporate bridge functionality. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use crate::justification::{ - GrandpaJustification, JustificationVerificationContext, JustificationVerificationError, -}; -use bp_runtime::{ - BasicOperatingMode, Chain, HashOf, HasherOf, HeaderOf, RawStorageProof, StorageProofChecker, - StorageProofError, UnderlyingChainProvider, -}; -use codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}; -use core::{clone::Clone, cmp::Eq, default::Default, fmt::Debug}; -use frame_support::PalletError; -use scale_info::TypeInfo; -use serde::{Deserialize, Serialize}; -use sp_consensus_grandpa::{AuthorityList, ConsensusLog, SetId, GRANDPA_ENGINE_ID}; -use sp_runtime::{traits::Header as HeaderT, Digest, RuntimeDebug}; -use sp_std::{boxed::Box, vec::Vec}; - -pub mod justification; -pub mod storage_keys; - -/// Header chain error. -#[derive(Clone, Decode, Encode, Eq, PartialEq, PalletError, Debug, TypeInfo)] -pub enum HeaderChainError { - /// Header with given hash is missing from the chain. - UnknownHeader, - /// Storage proof related error. - StorageProof(StorageProofError), -} - -/// Header data that we're storing on-chain. -/// -/// Even though we may store full header, our applications (XCM) only use couple of header -/// fields. Extracting those values makes on-chain storage and PoV smaller, which is good. -#[derive(Clone, Decode, Encode, Eq, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)] -pub struct StoredHeaderData { - /// Header number. - pub number: Number, - /// Header state root. - pub state_root: Hash, -} - -/// Stored header data builder. -pub trait StoredHeaderDataBuilder { - /// Build header data from self. - fn build(&self) -> StoredHeaderData; -} - -impl StoredHeaderDataBuilder for H { - fn build(&self) -> StoredHeaderData { - StoredHeaderData { number: *self.number(), state_root: *self.state_root() } - } -} - -/// Substrate header chain, abstracted from the way it is stored. -pub trait HeaderChain { - /// Returns state (storage) root of given finalized header. - fn finalized_header_state_root(header_hash: HashOf) -> Option>; - /// Get storage proof checker using finalized header. - fn storage_proof_checker( - header_hash: HashOf, - storage_proof: RawStorageProof, - ) -> Result>, HeaderChainError> { - let state_root = Self::finalized_header_state_root(header_hash) - .ok_or(HeaderChainError::UnknownHeader)?; - StorageProofChecker::new(state_root, storage_proof).map_err(HeaderChainError::StorageProof) - } -} - -/// A type that can be used as a parameter in a dispatchable function. -/// -/// When using `decl_module` all arguments for call functions must implement this trait. -pub trait Parameter: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {} -impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + Debug + TypeInfo {} - -/// A GRANDPA Authority List and ID. -#[derive(Default, Encode, Eq, Decode, RuntimeDebug, PartialEq, Clone, TypeInfo)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -pub struct AuthoritySet { - /// List of GRANDPA authorities for the current round. - pub authorities: AuthorityList, - /// Monotonic identifier of the current GRANDPA authority set. - pub set_id: SetId, -} - -impl AuthoritySet { - /// Create a new GRANDPA Authority Set. - pub fn new(authorities: AuthorityList, set_id: SetId) -> Self { - Self { authorities, set_id } - } -} - -/// Data required for initializing the GRANDPA bridge pallet. -/// -/// The bridge needs to know where to start its sync from, and this provides that initial context. -#[derive( - Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, Clone, TypeInfo, Serialize, Deserialize, -)] -pub struct InitializationData { - /// The header from which we should start syncing. - pub header: Box, - /// The initial authorities of the pallet. - pub authority_list: AuthorityList, - /// The ID of the initial authority set. - pub set_id: SetId, - /// Pallet operating mode. - pub operating_mode: BasicOperatingMode, -} - -/// Abstract finality proof that is justifying block finality. -pub trait FinalityProof: Clone + Send + Sync + Debug { - /// Return hash of header that this proof is generated for. - fn target_header_hash(&self) -> Hash; - - /// Return number of header that this proof is generated for. - fn target_header_number(&self) -> Number; -} - -/// A trait that provides helper methods for querying the consensus log. -pub trait ConsensusLogReader { - /// Returns true if digest contains item that schedules authorities set change. - fn schedules_authorities_change(digest: &Digest) -> bool; -} - -/// A struct that provides helper methods for querying the GRANDPA consensus log. -pub struct GrandpaConsensusLogReader(sp_std::marker::PhantomData); - -impl GrandpaConsensusLogReader { - /// Find and return scheduled (regular) change digest item. - pub fn find_scheduled_change( - digest: &Digest, - ) -> Option> { - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - digest - .convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID)) - .and_then(|log| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }) - } - - /// Find and return forced change digest item. Or light client can't do anything - /// with forced changes, so we can't accept header with the forced change digest. - pub fn find_forced_change( - digest: &Digest, - ) -> Option<(Number, sp_consensus_grandpa::ScheduledChange)> { - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - digest - .convert_first(|log| log.consensus_try_to(&GRANDPA_ENGINE_ID)) - .and_then(|log| match log { - ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), - _ => None, - }) - } -} - -impl ConsensusLogReader for GrandpaConsensusLogReader { - fn schedules_authorities_change(digest: &Digest) -> bool { - GrandpaConsensusLogReader::::find_scheduled_change(digest).is_some() - } -} - -/// The finality-related info associated to a header. -#[derive(Encode, Decode, Debug, PartialEq, Clone, TypeInfo)] -pub struct HeaderFinalityInfo { - /// The header finality proof. - pub finality_proof: FinalityProof, - /// The new verification context introduced by the header. - pub new_verification_context: Option, -} - -/// Grandpa-related info associated to a header. This info can be saved to events. -pub type StoredHeaderGrandpaInfo
= - HeaderFinalityInfo, AuthoritySet>; - -/// Processed Grandpa-related info associated to a header. -pub type HeaderGrandpaInfo
= - HeaderFinalityInfo, JustificationVerificationContext>; - -impl TryFrom> for HeaderGrandpaInfo
{ - type Error = JustificationVerificationError; - - fn try_from(grandpa_info: StoredHeaderGrandpaInfo
) -> Result { - Ok(Self { - finality_proof: grandpa_info.finality_proof, - new_verification_context: match grandpa_info.new_verification_context { - Some(authority_set) => Some(authority_set.try_into()?), - None => None, - }, - }) - } -} - -/// Helper trait for finding equivocations in finality proofs. -pub trait FindEquivocations { - /// The type returned when encountering an error while looking for equivocations. - type Error: Debug; - - /// Find equivocations. - fn find_equivocations( - verification_context: &FinalityVerificationContext, - synced_proof: &FinalityProof, - source_proofs: &[FinalityProof], - ) -> Result, Self::Error>; -} - -/// A minimized version of `pallet-bridge-grandpa::Call` that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeGrandpaCall { - /// `pallet-bridge-grandpa::Call::submit_finality_proof` - #[codec(index = 0)] - submit_finality_proof { - /// The header that we are going to finalize. - finality_target: Box
, - /// Finality justification for the `finality_target`. - justification: justification::GrandpaJustification
, - }, - /// `pallet-bridge-grandpa::Call::initialize` - #[codec(index = 1)] - initialize { - /// All data, required to initialize the pallet. - init_data: InitializationData
, - }, - /// `pallet-bridge-grandpa::Call::submit_finality_proof_ex` - #[codec(index = 4)] - submit_finality_proof_ex { - /// The header that we are going to finalize. - finality_target: Box
, - /// Finality justification for the `finality_target`. - justification: justification::GrandpaJustification
, - /// An identifier of the validators set, that have signed the justification. - current_set_id: SetId, - }, -} - -/// The `BridgeGrandpaCall` used by a chain. -pub type BridgeGrandpaCallOf = BridgeGrandpaCall>; - -/// Substrate-based chain that is using direct GRANDPA finality. -/// -/// Keep in mind that parachains are relying on relay chain GRANDPA, so they should not implement -/// this trait. -pub trait ChainWithGrandpa: Chain { - /// Name of the bridge GRANDPA pallet (used in `construct_runtime` macro call) that is deployed - /// at some other chain to bridge with this `ChainWithGrandpa`. - /// - /// We assume that all chains that are bridging with this `ChainWithGrandpa` are using - /// the same name. - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str; - - /// Max number of GRANDPA authorities at the chain. - /// - /// This is a strict constant. If bridged chain will have more authorities than that, - /// the GRANDPA bridge pallet may halt. - const MAX_AUTHORITIES_COUNT: u32; - - /// Max reasonable number of headers in `votes_ancestries` vector of the GRANDPA justification. - /// - /// This isn't a strict limit. The relay may submit justifications with more headers in its - /// ancestry and the pallet will accept such justification. The limit is only used to compute - /// maximal refund amount and submitting justifications which exceed the limit, may be costly - /// to submitter. - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32; - - /// Maximal size of the mandatory chain header. Mandatory header is the header that enacts new - /// GRANDPA authorities set (so it has large digest inside). - /// - /// This isn't a strict limit. The relay may submit larger headers and the pallet will accept - /// the call. The limit is only used to compute maximal refund amount and doing calls which - /// exceed the limit, may be costly to submitter. - const MAX_MANDATORY_HEADER_SIZE: u32; - - /// Average size of the chain header. We don't expect to see there headers that change GRANDPA - /// authorities set (GRANDPA will probably be able to finalize at least one additional header - /// per session on non test chains), so this is average size of headers that aren't changing the - /// set. - /// - /// This isn't a strict limit. The relay may submit justifications with larger headers and the - /// pallet will accept the call. However, if the total size of all `submit_finality_proof` - /// arguments exceeds the maximal size, computed using this average size, relayer will only get - /// partial refund. - /// - /// We expect some headers on production chains that are above this size. But they are rare and - /// if rellayer cares about its profitability, we expect it'll select other headers for - /// submission. - const AVERAGE_HEADER_SIZE: u32; -} - -impl ChainWithGrandpa for T -where - T: Chain + UnderlyingChainProvider, - T::Chain: ChainWithGrandpa, -{ - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = - ::WITH_CHAIN_GRANDPA_PALLET_NAME; - const MAX_AUTHORITIES_COUNT: u32 = ::MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - ::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; - const MAX_MANDATORY_HEADER_SIZE: u32 = - ::MAX_MANDATORY_HEADER_SIZE; - const AVERAGE_HEADER_SIZE: u32 = ::AVERAGE_HEADER_SIZE; -} - -/// Returns maximal expected size of `submit_finality_proof` call arguments. -pub fn max_expected_submit_finality_proof_arguments_size( - is_mandatory_finality_target: bool, - precommits: u32, -) -> u32 { - let max_expected_justification_size = - GrandpaJustification::>::max_reasonable_size::(precommits); - - // call arguments are header and justification - let max_expected_finality_target_size = if is_mandatory_finality_target { - C::MAX_MANDATORY_HEADER_SIZE - } else { - C::AVERAGE_HEADER_SIZE - }; - max_expected_finality_target_size.saturating_add(max_expected_justification_size) -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_runtime::ChainId; - use frame_support::weights::Weight; - use sp_runtime::{testing::H256, traits::BlakeTwo256, MultiSignature}; - - struct TestChain; - - impl Chain for TestChain { - const ID: ChainId = *b"test"; - - type BlockNumber = u32; - type Hash = H256; - type Hasher = BlakeTwo256; - type Header = sp_runtime::generic::Header; - type AccountId = u64; - type Balance = u64; - type Nonce = u64; - type Signature = MultiSignature; - - fn max_extrinsic_size() -> u32 { - 0 - } - fn max_extrinsic_weight() -> Weight { - Weight::zero() - } - } - - impl ChainWithGrandpa for TestChain { - const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = "Test"; - const MAX_AUTHORITIES_COUNT: u32 = 128; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; - const MAX_MANDATORY_HEADER_SIZE: u32 = 100_000; - const AVERAGE_HEADER_SIZE: u32 = 1_024; - } - - #[test] - fn max_expected_submit_finality_proof_arguments_size_respects_mandatory_argument() { - assert!( - max_expected_submit_finality_proof_arguments_size::(true, 100) > - max_expected_submit_finality_proof_arguments_size::(false, 100), - ); - } -} diff --git a/primitives/header-chain/src/storage_keys.rs b/primitives/header-chain/src/storage_keys.rs deleted file mode 100644 index 55d095afbf2ae5ab00848676e455bf6da3c91166..0000000000000000000000000000000000000000 --- a/primitives/header-chain/src/storage_keys.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage keys of bridge GRANDPA pallet. - -/// Name of the `IsHalted` storage value. -pub const PALLET_OPERATING_MODE_VALUE_NAME: &str = "PalletOperatingMode"; -/// Name of the `BestFinalized` storage value. -pub const BEST_FINALIZED_VALUE_NAME: &str = "BestFinalized"; -/// Name of the `CurrentAuthoritySet` storage value. -pub const CURRENT_AUTHORITY_SET_VALUE_NAME: &str = "CurrentAuthoritySet"; - -use sp_core::storage::StorageKey; - -/// Storage key of the `PalletOperatingMode` variable in the runtime storage. -pub fn pallet_operating_mode_key(pallet_prefix: &str) -> StorageKey { - StorageKey( - bp_runtime::storage_value_final_key( - pallet_prefix.as_bytes(), - PALLET_OPERATING_MODE_VALUE_NAME.as_bytes(), - ) - .to_vec(), - ) -} - -/// Storage key of the `CurrentAuthoritySet` variable in the runtime storage. -pub fn current_authority_set_key(pallet_prefix: &str) -> StorageKey { - StorageKey( - bp_runtime::storage_value_final_key( - pallet_prefix.as_bytes(), - CURRENT_AUTHORITY_SET_VALUE_NAME.as_bytes(), - ) - .to_vec(), - ) -} - -/// Storage key of the best finalized header number and hash value in the runtime storage. -pub fn best_finalized_key(pallet_prefix: &str) -> StorageKey { - StorageKey( - bp_runtime::storage_value_final_key( - pallet_prefix.as_bytes(), - BEST_FINALIZED_VALUE_NAME.as_bytes(), - ) - .to_vec(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn pallet_operating_mode_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // compatibility with previous pallet. - let storage_key = pallet_operating_mode_key("BridgeGrandpa").0; - assert_eq!( - storage_key, - hex!("0b06f475eddb98cf933a12262e0388de0f4cf0917788d791142ff6c1f216e7b3").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn current_authority_set_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // compatibility with previous pallet. - let storage_key = current_authority_set_key("BridgeGrandpa").0; - assert_eq!( - storage_key, - hex!("0b06f475eddb98cf933a12262e0388de24a7b8b5717ea33346fa595a66ccbcb0").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn best_finalized_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // compatibility with previous pallet. - let storage_key = best_finalized_key("BridgeGrandpa").0; - assert_eq!( - storage_key, - hex!("0b06f475eddb98cf933a12262e0388dea4ebafdd473c549fdb24c5c991c5591c").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } -} diff --git a/primitives/header-chain/tests/implementation_match.rs b/primitives/header-chain/tests/implementation_match.rs deleted file mode 100644 index 1f61f91ff4bbfc26fdfc53210408174c7eb934eb..0000000000000000000000000000000000000000 --- a/primitives/header-chain/tests/implementation_match.rs +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests inside this module are made to ensure that our custom justification verification -//! implementation works similar to the [`finality_grandpa::validate_commit`] and explicitly -//! show where we behave different. -//! -//! Some of tests in this module may partially duplicate tests from `justification.rs`, -//! but their purpose is different. - -use bp_header_chain::justification::{ - verify_justification, GrandpaJustification, JustificationVerificationContext, - JustificationVerificationError, PrecommitError, -}; -use bp_test_utils::{ - header_id, make_justification_for_header, signed_precommit, test_header, Account, - JustificationGeneratorParams, ALICE, BOB, CHARLIE, DAVE, EVE, FERDIE, TEST_GRANDPA_SET_ID, -}; -use finality_grandpa::voter_set::VoterSet; -use sp_consensus_grandpa::{AuthorityId, AuthorityWeight, SetId}; -use sp_runtime::traits::Header as HeaderT; - -type TestHeader = sp_runtime::testing::Header; -type TestHash = ::Hash; -type TestNumber = ::Number; - -/// Implementation of `finality_grandpa::Chain` that is used in tests. -struct AncestryChain(bp_header_chain::justification::AncestryChain); - -impl AncestryChain { - fn new(justification: &GrandpaJustification) -> Self { - Self(bp_header_chain::justification::AncestryChain::new(justification).0) - } -} - -impl finality_grandpa::Chain for AncestryChain { - fn ancestry( - &self, - base: TestHash, - block: TestHash, - ) -> Result, finality_grandpa::Error> { - let mut route = Vec::new(); - let mut current_hash = block; - loop { - if current_hash == base { - break - } - match self.0.parent_hash_of(¤t_hash) { - Some(parent_hash) => { - current_hash = *parent_hash; - route.push(current_hash); - }, - _ => return Err(finality_grandpa::Error::NotDescendent), - } - } - route.pop(); // remove the base - - Ok(route) - } -} - -/// Get a full set of accounts. -fn full_accounts_set() -> Vec<(Account, AuthorityWeight)> { - vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1), (EVE, 1)] -} - -/// Get a full set of GRANDPA authorities. -fn full_voter_set() -> VoterSet { - VoterSet::new(full_accounts_set().iter().map(|(id, w)| (AuthorityId::from(*id), *w))).unwrap() -} - -pub fn full_verification_context(set_id: SetId) -> JustificationVerificationContext { - let voter_set = full_voter_set(); - JustificationVerificationContext { voter_set, authority_set_id: set_id } -} - -/// Get a minimal set of accounts. -fn minimal_accounts_set() -> Vec<(Account, AuthorityWeight)> { - // there are 5 accounts in the full set => we need 2/3 + 1 accounts, which results in 4 accounts - vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1), (DAVE, 1)] -} - -/// Make a valid GRANDPA justification with sensible defaults. -pub fn make_default_justification(header: &TestHeader) -> GrandpaJustification { - make_justification_for_header(JustificationGeneratorParams { - header: header.clone(), - authorities: minimal_accounts_set(), - ..Default::default() - }) -} - -// the `finality_grandpa::validate_commit` function has two ways to report an unsuccessful -// commit validation: -// -// 1) to return `Err()` (which only may happen if `finality_grandpa::Chain` implementation returns -// an error); -// 2) to return `Ok(validation_result)` if `validation_result.is_valid()` is false. -// -// Our implementation would just return error in both cases. - -#[test] -fn same_result_when_precommit_target_has_lower_number_than_commit_target() { - let mut justification = make_default_justification(&test_header(1)); - // the number of header in precommit (0) is lower than number of header in commit (1) - justification.commit.precommits[0].precommit.target_number = 0; - - // our implementation returns an error - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::UnrelatedAncestryVote)), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(!result.is_valid()); -} - -#[test] -fn same_result_when_precommit_target_is_not_descendant_of_commit_target() { - let not_descendant = test_header::(10); - let mut justification = make_default_justification(&test_header(1)); - // the route from header of commit (1) to header of precommit (10) is missing from - // the votes ancestries - justification.commit.precommits[0].precommit.target_number = *not_descendant.number(); - justification.commit.precommits[0].precommit.target_hash = not_descendant.hash(); - justification.votes_ancestries.push(not_descendant); - - // our implementation returns an error - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::UnrelatedAncestryVote)), - ); - - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(!result.is_valid()); -} - -#[test] -fn same_result_when_there_are_not_enough_cumulative_weight_to_finalize_commit_target() { - // just remove one authority from the minimal set and we shall not reach the threshold - let mut authorities_set = minimal_accounts_set(); - authorities_set.pop(); - let justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: authorities_set, - ..Default::default() - }); - - // our implementation returns an error - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::TooLowCumulativeWeight), - ); - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == false`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(!result.is_valid()); -} - -// tests below are our differences with the original implementation - -#[test] -fn different_result_when_justification_contains_duplicate_vote() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - // the justification may contain exactly the same vote (i.e. same precommit and same signature) - // multiple times && it isn't treated as an error by original implementation - let last_precommit = justification.commit.precommits.pop().unwrap(); - justification.commit.precommits.push(justification.commit.precommits[0].clone()); - justification.commit.precommits.push(last_precommit); - - // our implementation fails - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::DuplicateAuthorityVote)), - ); - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(result.is_valid()); -} - -#[test] -fn different_results_when_authority_equivocates_once_in_a_round() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - // the justification original implementation allows authority to submit two different - // votes in a single round, of which only first is 'accepted' - let last_precommit = justification.commit.precommits.pop().unwrap(); - justification.commit.precommits.push(signed_precommit::( - &ALICE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - justification.commit.precommits.push(last_precommit); - - // our implementation fails - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::DuplicateAuthorityVote)), - ); - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(result.is_valid()); -} - -#[test] -fn different_results_when_authority_equivocates_twice_in_a_round() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - // there's some code in the original implementation that should return an error when - // same authority submits more than two different votes in a single round: - // https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/lib.rs#L473 - // but there's also a code that prevents this from happening: - // https://github.com/paritytech/finality-grandpa/blob/6aeea2d1159d0f418f0b86e70739f2130629ca09/src/round.rs#L287 - // => so now we are also just ignoring all votes from the same authority, except the first one - let last_precommit = justification.commit.precommits.pop().unwrap(); - let prev_last_precommit = justification.commit.precommits.pop().unwrap(); - justification.commit.precommits.push(signed_precommit::( - &ALICE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - justification.commit.precommits.push(signed_precommit::( - &ALICE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - justification.commit.precommits.push(last_precommit); - justification.commit.precommits.push(prev_last_precommit); - - // our implementation fails - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::DuplicateAuthorityVote)), - ); - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(result.is_valid()); -} - -#[test] -fn different_results_when_there_are_more_than_enough_votes() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - // the reference implementation just keep verifying signatures even if we have - // collected enough votes. We are not - justification.commit.precommits.push(signed_precommit::( - &EVE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - - // our implementation fails - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::RedundantAuthorityVote)), - ); - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(result.is_valid()); -} - -#[test] -fn different_results_when_there_is_a_vote_of_unknown_authority() { - let mut justification = make_justification_for_header(JustificationGeneratorParams { - header: test_header(1), - authorities: minimal_accounts_set(), - ancestors: 0, - ..Default::default() - }); - // the reference implementation just keep verifying signatures even if we have - // collected enough votes. We are not - let last_precommit = justification.commit.precommits.pop().unwrap(); - justification.commit.precommits.push(signed_precommit::( - &FERDIE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - justification.commit.precommits.push(last_precommit); - - // our implementation fails - assert_eq!( - verify_justification::( - header_id::(1), - &full_verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::UnknownAuthorityVote)), - ); - // original implementation returns `Ok(validation_result)` - // with `validation_result.is_valid() == true`. - let result = finality_grandpa::validate_commit( - &justification.commit, - &full_voter_set(), - &AncestryChain::new(&justification), - ) - .unwrap(); - - assert!(result.is_valid()); -} diff --git a/primitives/header-chain/tests/justification/equivocation.rs b/primitives/header-chain/tests/justification/equivocation.rs deleted file mode 100644 index 0bc084cc1a97069dd4dff35f41ad53e6fde69319..0000000000000000000000000000000000000000 --- a/primitives/header-chain/tests/justification/equivocation.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for Grandpa equivocations collector code. - -use bp_header_chain::justification::EquivocationsCollector; -use bp_test_utils::*; -use finality_grandpa::Precommit; -use sp_consensus_grandpa::EquivocationProof; - -type TestHeader = sp_runtime::testing::Header; - -#[test] -fn duplicate_votes_are_not_considered_equivocations() { - let verification_context = verification_context(TEST_GRANDPA_SET_ID); - let base_justification = make_default_justification::(&test_header(1)); - - let mut collector = - EquivocationsCollector::new(&verification_context, &base_justification).unwrap(); - collector.parse_justifications(&[base_justification.clone()]); - - assert_eq!(collector.into_equivocation_proofs().len(), 0); -} - -#[test] -fn equivocations_are_detected_in_base_justification_redundant_votes() { - let mut base_justification = make_default_justification::(&test_header(1)); - - let first_vote = base_justification.commit.precommits[0].clone(); - let equivocation = signed_precommit::( - &ALICE, - header_id::(1), - base_justification.round, - TEST_GRANDPA_SET_ID, - ); - base_justification.commit.precommits.push(equivocation.clone()); - - let verification_context = verification_context(TEST_GRANDPA_SET_ID); - let collector = - EquivocationsCollector::new(&verification_context, &base_justification).unwrap(); - - assert_eq!( - collector.into_equivocation_proofs(), - vec![EquivocationProof::new( - 1, - sp_consensus_grandpa::Equivocation::Precommit(finality_grandpa::Equivocation { - round_number: 1, - identity: ALICE.into(), - first: ( - Precommit { - target_hash: first_vote.precommit.target_hash, - target_number: first_vote.precommit.target_number - }, - first_vote.signature - ), - second: ( - Precommit { - target_hash: equivocation.precommit.target_hash, - target_number: equivocation.precommit.target_number - }, - equivocation.signature - ) - }) - )] - ); -} - -#[test] -fn equivocations_are_detected_in_extra_justification_redundant_votes() { - let base_justification = make_default_justification::(&test_header(1)); - let first_vote = base_justification.commit.precommits[0].clone(); - - let mut extra_justification = base_justification.clone(); - let equivocation = signed_precommit::( - &ALICE, - header_id::(1), - base_justification.round, - TEST_GRANDPA_SET_ID, - ); - extra_justification.commit.precommits.push(equivocation.clone()); - - let verification_context = verification_context(TEST_GRANDPA_SET_ID); - let mut collector = - EquivocationsCollector::new(&verification_context, &base_justification).unwrap(); - collector.parse_justifications(&[extra_justification]); - - assert_eq!( - collector.into_equivocation_proofs(), - vec![EquivocationProof::new( - 1, - sp_consensus_grandpa::Equivocation::Precommit(finality_grandpa::Equivocation { - round_number: 1, - identity: ALICE.into(), - first: ( - Precommit { - target_hash: first_vote.precommit.target_hash, - target_number: first_vote.precommit.target_number - }, - first_vote.signature - ), - second: ( - Precommit { - target_hash: equivocation.precommit.target_hash, - target_number: equivocation.precommit.target_number - }, - equivocation.signature - ) - }) - )] - ); -} diff --git a/primitives/header-chain/tests/justification/optimizer.rs b/primitives/header-chain/tests/justification/optimizer.rs deleted file mode 100644 index 8d7e2d6502568c75e592e9a87003a930272aa2ab..0000000000000000000000000000000000000000 --- a/primitives/header-chain/tests/justification/optimizer.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for Grandpa Justification optimizer code. - -use bp_header_chain::justification::verify_and_optimize_justification; -use bp_test_utils::*; -use finality_grandpa::SignedPrecommit; -use sp_consensus_grandpa::AuthoritySignature; - -type TestHeader = sp_runtime::testing::Header; - -#[test] -fn optimizer_does_noting_with_minimal_justification() { - let mut justification = make_default_justification::(&test_header(1)); - - let num_precommits_before = justification.commit.precommits.len(); - verify_and_optimize_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - let num_precommits_after = justification.commit.precommits.len(); - - assert_eq!(num_precommits_before, num_precommits_after); -} - -#[test] -fn unknown_authority_votes_are_removed_by_optimizer() { - let mut justification = make_default_justification::(&test_header(1)); - justification.commit.precommits.push(signed_precommit::( - &bp_test_utils::Account(42), - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - - let num_precommits_before = justification.commit.precommits.len(); - verify_and_optimize_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - let num_precommits_after = justification.commit.precommits.len(); - - assert_eq!(num_precommits_before - 1, num_precommits_after); -} - -#[test] -fn duplicate_authority_votes_are_removed_by_optimizer() { - let mut justification = make_default_justification::(&test_header(1)); - justification - .commit - .precommits - .push(justification.commit.precommits.first().cloned().unwrap()); - - let num_precommits_before = justification.commit.precommits.len(); - verify_and_optimize_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - let num_precommits_after = justification.commit.precommits.len(); - - assert_eq!(num_precommits_before - 1, num_precommits_after); -} - -#[test] -fn invalid_authority_signatures_are_removed_by_optimizer() { - let mut justification = make_default_justification::(&test_header(1)); - - let target = header_id::(1); - let invalid_raw_signature: Vec = ALICE.sign(b"").to_bytes().into(); - justification.commit.precommits.insert( - 0, - SignedPrecommit { - precommit: finality_grandpa::Precommit { - target_hash: target.0, - target_number: target.1, - }, - signature: AuthoritySignature::try_from(invalid_raw_signature).unwrap(), - id: ALICE.into(), - }, - ); - - let num_precommits_before = justification.commit.precommits.len(); - verify_and_optimize_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - let num_precommits_after = justification.commit.precommits.len(); - - assert_eq!(num_precommits_before - 1, num_precommits_after); -} - -#[test] -fn redundant_authority_votes_are_removed_by_optimizer() { - let mut justification = make_default_justification::(&test_header(1)); - justification.commit.precommits.push(signed_precommit::( - &EVE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - )); - - let num_precommits_before = justification.commit.precommits.len(); - verify_and_optimize_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - let num_precommits_after = justification.commit.precommits.len(); - - assert_eq!(num_precommits_before - 1, num_precommits_after); -} - -#[test] -fn unrelated_ancestry_votes_are_removed_by_optimizer() { - let mut justification = make_default_justification::(&test_header(2)); - justification.commit.precommits.insert( - 0, - signed_precommit::( - &ALICE, - header_id::(1), - justification.round, - TEST_GRANDPA_SET_ID, - ), - ); - - let num_precommits_before = justification.commit.precommits.len(); - verify_and_optimize_justification::( - header_id::(2), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - let num_precommits_after = justification.commit.precommits.len(); - - assert_eq!(num_precommits_before - 1, num_precommits_after); -} - -#[test] -fn duplicate_votes_ancestries_are_removed_by_optimizer() { - let mut justification = make_default_justification::(&test_header(1)); - let optimized_votes_ancestries = justification.votes_ancestries.clone(); - justification.votes_ancestries = justification - .votes_ancestries - .into_iter() - .flat_map(|item| std::iter::repeat(item).take(3)) - .collect(); - - verify_and_optimize_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - - assert_eq!(justification.votes_ancestries, optimized_votes_ancestries); -} - -#[test] -fn redundant_votes_ancestries_are_removed_by_optimizer() { - let mut justification = make_default_justification::(&test_header(1)); - justification.votes_ancestries.push(test_header(100)); - - let num_votes_ancestries_before = justification.votes_ancestries.len(); - verify_and_optimize_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &mut justification, - ) - .unwrap(); - let num_votes_ancestries_after = justification.votes_ancestries.len(); - - assert_eq!(num_votes_ancestries_before - 1, num_votes_ancestries_after); -} diff --git a/primitives/header-chain/tests/justification/strict.rs b/primitives/header-chain/tests/justification/strict.rs deleted file mode 100644 index 639a669572b217526e0a35929026136e93d24285..0000000000000000000000000000000000000000 --- a/primitives/header-chain/tests/justification/strict.rs +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for Grandpa strict justification verifier code. - -use bp_header_chain::justification::{ - required_justification_precommits, verify_justification, JustificationVerificationContext, - JustificationVerificationError, PrecommitError, -}; -use bp_test_utils::*; - -type TestHeader = sp_runtime::testing::Header; - -#[test] -fn valid_justification_accepted() { - let authorities = vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)]; - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone(), - ancestors: 7, - forks: 3, - }; - - let justification = make_justification_for_header::(params.clone()); - assert_eq!( - verify_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Ok(()), - ); - - assert_eq!(justification.commit.precommits.len(), authorities.len()); - assert_eq!(justification.votes_ancestries.len(), params.ancestors as usize); -} - -#[test] -fn valid_justification_accepted_with_single_fork() { - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)], - ancestors: 5, - forks: 1, - }; - - assert_eq!( - verify_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &make_justification_for_header::(params) - ), - Ok(()), - ); -} - -#[test] -fn valid_justification_accepted_with_arbitrary_number_of_authorities() { - use finality_grandpa::voter_set::VoterSet; - use sp_consensus_grandpa::AuthorityId; - - let n = 15; - let required_signatures = required_justification_precommits(n as _); - let authorities = accounts(n).iter().map(|k| (*k, 1)).collect::>(); - - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone().into_iter().take(required_signatures as _).collect(), - ancestors: n.into(), - forks: required_signatures, - }; - - let authorities = authorities - .iter() - .map(|(id, w)| (AuthorityId::from(*id), *w)) - .collect::>(); - let voter_set = VoterSet::new(authorities).unwrap(); - - assert_eq!( - verify_justification::( - header_id::(1), - &JustificationVerificationContext { voter_set, authority_set_id: TEST_GRANDPA_SET_ID }, - &make_justification_for_header::(params) - ), - Ok(()), - ); -} - -#[test] -fn justification_with_invalid_target_rejected() { - assert_eq!( - verify_justification::( - header_id::(2), - &verification_context(TEST_GRANDPA_SET_ID), - &make_default_justification::(&test_header(1)), - ), - Err(JustificationVerificationError::InvalidJustificationTarget), - ); -} - -#[test] -fn justification_with_invalid_commit_rejected() { - let mut justification = make_default_justification::(&test_header(1)); - justification.commit.precommits.clear(); - - assert_eq!( - verify_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::TooLowCumulativeWeight), - ); -} - -#[test] -fn justification_with_invalid_authority_signature_rejected() { - let mut justification = make_default_justification::(&test_header(1)); - justification.commit.precommits[0].signature = - sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]); - - assert_eq!( - verify_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::Precommit(PrecommitError::InvalidAuthoritySignature)), - ); -} - -#[test] -fn justification_with_duplicate_votes_ancestry() { - let mut justification = make_default_justification::(&test_header(1)); - justification.votes_ancestries.push(justification.votes_ancestries[0].clone()); - - assert_eq!( - verify_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::DuplicateVotesAncestries), - ); -} -#[test] -fn justification_with_redundant_votes_ancestry() { - let mut justification = make_default_justification::(&test_header(1)); - justification.votes_ancestries.push(test_header(10)); - - assert_eq!( - verify_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &justification, - ), - Err(JustificationVerificationError::RedundantVotesAncestries), - ); -} - -#[test] -fn justification_is_invalid_if_we_dont_meet_threshold() { - // Need at least three authorities to sign off or else the voter set threshold can't be reached - let authorities = vec![(ALICE, 1), (BOB, 1)]; - - let params = JustificationGeneratorParams { - header: test_header(1), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: authorities.clone(), - ancestors: 2 * authorities.len() as u32, - forks: 2, - }; - - assert_eq!( - verify_justification::( - header_id::(1), - &verification_context(TEST_GRANDPA_SET_ID), - &make_justification_for_header::(params) - ), - Err(JustificationVerificationError::TooLowCumulativeWeight), - ); -} diff --git a/primitives/header-chain/tests/tests.rs b/primitives/header-chain/tests/tests.rs deleted file mode 100644 index 269fde09bb71ce3df3d955811f9da3e10c100ef3..0000000000000000000000000000000000000000 --- a/primitives/header-chain/tests/tests.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -mod justification { - mod equivocation; - mod optimizer; - mod strict; -} - -mod implementation_match; diff --git a/primitives/messages/Cargo.toml b/primitives/messages/Cargo.toml deleted file mode 100644 index d121b693146484003e556f6af7c4e961211b9a32..0000000000000000000000000000000000000000 --- a/primitives/messages/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "bp-messages" -description = "Primitives of messages module." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } -serde = { features = ["alloc", "derive"], workspace = true } - -# Bridge dependencies - -bp-runtime = { path = "../runtime", default-features = false } -bp-header-chain = { path = "../header-chain", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -hex = "0.4" -hex-literal = "0.4" - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "serde/std", - "sp-core/std", - "sp-std/std", -] diff --git a/primitives/messages/src/lib.rs b/primitives/messages/src/lib.rs deleted file mode 100644 index 51b3f25f7151867b52e8e5f49bc70b0a3632c05e..0000000000000000000000000000000000000000 --- a/primitives/messages/src/lib.rs +++ /dev/null @@ -1,567 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of messages module. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_header_chain::HeaderChainError; -use bp_runtime::{ - messages::MessageDispatchResult, BasicOperatingMode, Chain, OperatingMode, RangeInclusiveExt, - StorageProofError, UnderlyingChainOf, UnderlyingChainProvider, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::PalletError; -// Weight is reexported to avoid additional frame-support dependencies in related crates. -pub use frame_support::weights::Weight; -use scale_info::TypeInfo; -use serde::{Deserialize, Serialize}; -use source_chain::RelayersRewards; -use sp_core::{RuntimeDebug, TypeId}; -use sp_std::{collections::vec_deque::VecDeque, ops::RangeInclusive, prelude::*}; - -pub mod source_chain; -pub mod storage_keys; -pub mod target_chain; - -/// Substrate-based chain with messaging support. -pub trait ChainWithMessages: Chain { - /// Name of the bridge messages pallet (used in `construct_runtime` macro call) that is - /// deployed at some other chain to bridge with this `ChainWithMessages`. - /// - /// We assume that all chains that are bridging with this `ChainWithMessages` are using - /// the same name. - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str; - - /// Maximal number of unrewarded relayers in a single confirmation transaction at this - /// `ChainWithMessages`. - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce; - /// Maximal number of unconfirmed messages in a single confirmation transaction at this - /// `ChainWithMessages`. - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce; -} - -impl ChainWithMessages for T -where - T: Chain + UnderlyingChainProvider, - UnderlyingChainOf: ChainWithMessages, -{ - const WITH_CHAIN_MESSAGES_PALLET_NAME: &'static str = - UnderlyingChainOf::::WITH_CHAIN_MESSAGES_PALLET_NAME; - const MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX: MessageNonce = - UnderlyingChainOf::::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX; - const MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX: MessageNonce = - UnderlyingChainOf::::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX; -} - -/// Messages pallet operating mode. -#[derive( - Encode, - Decode, - Clone, - Copy, - PartialEq, - Eq, - RuntimeDebug, - TypeInfo, - MaxEncodedLen, - Serialize, - Deserialize, -)] -pub enum MessagesOperatingMode { - /// Basic operating mode (Normal/Halted) - Basic(BasicOperatingMode), - /// The pallet is not accepting outbound messages. Inbound messages and receiving proofs - /// are still accepted. - /// - /// This mode may be used e.g. when bridged chain expects upgrade. Then to avoid dispatch - /// failures, the pallet owner may stop accepting new messages, while continuing to deliver - /// queued messages to the bridged chain. Once upgrade is completed, the mode may be switched - /// back to `Normal`. - RejectingOutboundMessages, -} - -impl Default for MessagesOperatingMode { - fn default() -> Self { - MessagesOperatingMode::Basic(BasicOperatingMode::Normal) - } -} - -impl OperatingMode for MessagesOperatingMode { - fn is_halted(&self) -> bool { - match self { - Self::Basic(operating_mode) => operating_mode.is_halted(), - _ => false, - } - } -} - -/// Lane id which implements `TypeId`. -#[derive( - Clone, Copy, Decode, Default, Encode, Eq, Ord, PartialOrd, PartialEq, TypeInfo, MaxEncodedLen, -)] -pub struct LaneId(pub [u8; 4]); - -impl core::fmt::Debug for LaneId { - fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { - self.0.fmt(fmt) - } -} - -impl AsRef<[u8]> for LaneId { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} - -impl TypeId for LaneId { - const TYPE_ID: [u8; 4] = *b"blan"; -} - -/// Message nonce. Valid messages will never have 0 nonce. -pub type MessageNonce = u64; - -/// Message id as a tuple. -pub type BridgeMessageId = (LaneId, MessageNonce); - -/// Opaque message payload. We only decode this payload when it is dispatched. -pub type MessagePayload = Vec; - -/// Message key (unique message identifier) as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo, MaxEncodedLen)] -pub struct MessageKey { - /// ID of the message lane. - pub lane_id: LaneId, - /// Message nonce. - pub nonce: MessageNonce, -} - -/// Message as it is stored in the storage. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] -pub struct Message { - /// Message key. - pub key: MessageKey, - /// Message payload. - pub payload: MessagePayload, -} - -/// Inbound lane data. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct InboundLaneData { - /// Identifiers of relayers and messages that they have delivered to this lane (ordered by - /// message nonce). - /// - /// This serves as a helper storage item, to allow the source chain to easily pay rewards - /// to the relayers who successfully delivered messages to the target chain (inbound lane). - /// - /// It is guaranteed to have at most N entries, where N is configured at the module level. - /// If there are N entries in this vec, then: - /// 1) all incoming messages are rejected if they're missing corresponding - /// `proof-of(outbound-lane.state)`; 2) all incoming messages are rejected if - /// `proof-of(outbound-lane.state).last_delivered_nonce` is equal to - /// `self.last_confirmed_nonce`. Given what is said above, all nonces in this queue are in - /// range: `(self.last_confirmed_nonce; self.last_delivered_nonce()]`. - /// - /// When a relayer sends a single message, both of MessageNonces are the same. - /// When relayer sends messages in a batch, the first arg is the lowest nonce, second arg the - /// highest nonce. Multiple dispatches from the same relayer are allowed. - pub relayers: VecDeque>, - - /// Nonce of the last message that - /// a) has been delivered to the target (this) chain and - /// b) the delivery has been confirmed on the source chain - /// - /// that the target chain knows of. - /// - /// This value is updated indirectly when an `OutboundLane` state of the source - /// chain is received alongside with new messages delivery. - pub last_confirmed_nonce: MessageNonce, -} - -impl Default for InboundLaneData { - fn default() -> Self { - InboundLaneData { relayers: VecDeque::new(), last_confirmed_nonce: 0 } - } -} - -impl InboundLaneData { - /// Returns approximate size of the struct, given a number of entries in the `relayers` set and - /// size of each entry. - /// - /// Returns `None` if size overflows `usize` limits. - pub fn encoded_size_hint(relayers_entries: usize) -> Option - where - RelayerId: MaxEncodedLen, - { - relayers_entries - .checked_mul(UnrewardedRelayer::::max_encoded_len())? - .checked_add(MessageNonce::max_encoded_len()) - } - - /// Returns the approximate size of the struct as u32, given a number of entries in the - /// `relayers` set and the size of each entry. - /// - /// Returns `u32::MAX` if size overflows `u32` limits. - pub fn encoded_size_hint_u32(relayers_entries: usize) -> u32 - where - RelayerId: MaxEncodedLen, - { - Self::encoded_size_hint(relayers_entries) - .and_then(|x| u32::try_from(x).ok()) - .unwrap_or(u32::MAX) - } - - /// Nonce of the last message that has been delivered to this (target) chain. - pub fn last_delivered_nonce(&self) -> MessageNonce { - self.relayers - .back() - .map(|entry| entry.messages.end) - .unwrap_or(self.last_confirmed_nonce) - } - - /// Returns the total number of messages in the `relayers` vector, - /// saturating in case of underflow or overflow. - pub fn total_unrewarded_messages(&self) -> MessageNonce { - let relayers = &self.relayers; - match (relayers.front(), relayers.back()) { - (Some(front), Some(back)) => - (front.messages.begin..=back.messages.end).saturating_len(), - _ => 0, - } - } -} - -/// Outbound message details, returned by runtime APIs. -#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct OutboundMessageDetails { - /// Nonce assigned to the message. - pub nonce: MessageNonce, - /// Message dispatch weight. - /// - /// Depending on messages pallet configuration, it may be declared by the message submitter, - /// computed automatically or just be zero if dispatch fee is paid at the target chain. - pub dispatch_weight: Weight, - /// Size of the encoded message. - pub size: u32, -} - -/// Inbound message details, returned by runtime APIs. -#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct InboundMessageDetails { - /// Computed message dispatch weight. - /// - /// Runtime API guarantees that it will match the value, returned by - /// `target_chain::MessageDispatch::dispatch_weight`. This means that if the runtime - /// has failed to decode the message, it will be zero - that's because `undecodable` - /// message cannot be dispatched. - pub dispatch_weight: Weight, -} - -/// Unrewarded relayer entry stored in the inbound lane data. -/// -/// This struct represents a continuous range of messages that have been delivered by the same -/// relayer and whose confirmations are still pending. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo, MaxEncodedLen)] -pub struct UnrewardedRelayer { - /// Identifier of the relayer. - pub relayer: RelayerId, - /// Messages range, delivered by this relayer. - pub messages: DeliveredMessages, -} - -/// Received messages with their dispatch result. -#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct ReceivedMessages { - /// Id of the lane which is receiving messages. - pub lane: LaneId, - /// Result of messages which we tried to dispatch - pub receive_results: Vec<(MessageNonce, ReceivalResult)>, -} - -impl ReceivedMessages { - /// Creates new `ReceivedMessages` structure from given results. - pub fn new( - lane: LaneId, - receive_results: Vec<(MessageNonce, ReceivalResult)>, - ) -> Self { - ReceivedMessages { lane, receive_results } - } - - /// Push `result` of the `message` delivery onto `receive_results` vector. - pub fn push(&mut self, message: MessageNonce, result: ReceivalResult) { - self.receive_results.push((message, result)); - } -} - -/// Result of single message receival. -#[derive(RuntimeDebug, Encode, Decode, PartialEq, Eq, Clone, TypeInfo)] -pub enum ReceivalResult { - /// Message has been received and dispatched. Note that we don't care whether dispatch has - /// been successful or not - in both case message falls into this category. - /// - /// The message dispatch result is also returned. - Dispatched(MessageDispatchResult), - /// Message has invalid nonce and lane has rejected to accept this message. - InvalidNonce, - /// There are too many unrewarded relayer entries at the lane. - TooManyUnrewardedRelayers, - /// There are too many unconfirmed messages at the lane. - TooManyUnconfirmedMessages, -} - -/// Delivered messages with their dispatch result. -#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo, MaxEncodedLen)] -pub struct DeliveredMessages { - /// Nonce of the first message that has been delivered (inclusive). - pub begin: MessageNonce, - /// Nonce of the last message that has been delivered (inclusive). - pub end: MessageNonce, -} - -impl DeliveredMessages { - /// Create new `DeliveredMessages` struct that confirms delivery of single nonce with given - /// dispatch result. - pub fn new(nonce: MessageNonce) -> Self { - DeliveredMessages { begin: nonce, end: nonce } - } - - /// Return total count of delivered messages. - pub fn total_messages(&self) -> MessageNonce { - (self.begin..=self.end).saturating_len() - } - - /// Note new dispatched message. - pub fn note_dispatched_message(&mut self) { - self.end += 1; - } - - /// Returns true if delivered messages contain message with given nonce. - pub fn contains_message(&self, nonce: MessageNonce) -> bool { - (self.begin..=self.end).contains(&nonce) - } -} - -/// Gist of `InboundLaneData::relayers` field used by runtime APIs. -#[derive(Clone, Default, Encode, Decode, RuntimeDebug, PartialEq, Eq, TypeInfo)] -pub struct UnrewardedRelayersState { - /// Number of entries in the `InboundLaneData::relayers` set. - pub unrewarded_relayer_entries: MessageNonce, - /// Number of messages in the oldest entry of `InboundLaneData::relayers`. This is the - /// minimal number of reward proofs required to push out this entry from the set. - pub messages_in_oldest_entry: MessageNonce, - /// Total number of messages in the relayers vector. - pub total_messages: MessageNonce, - /// Nonce of the latest message that has been delivered to the target chain. - /// - /// This corresponds to the result of the `InboundLaneData::last_delivered_nonce` call - /// at the bridged chain. - pub last_delivered_nonce: MessageNonce, -} - -impl UnrewardedRelayersState { - /// Verify that the relayers state corresponds with the `InboundLaneData`. - pub fn is_valid(&self, lane_data: &InboundLaneData) -> bool { - self == &lane_data.into() - } -} - -impl From<&InboundLaneData> for UnrewardedRelayersState { - fn from(lane: &InboundLaneData) -> UnrewardedRelayersState { - UnrewardedRelayersState { - unrewarded_relayer_entries: lane.relayers.len() as _, - messages_in_oldest_entry: lane - .relayers - .front() - .map(|entry| entry.messages.total_messages()) - .unwrap_or(0), - total_messages: lane.total_unrewarded_messages(), - last_delivered_nonce: lane.last_delivered_nonce(), - } - } -} - -/// Outbound lane data. -#[derive(Encode, Decode, Clone, RuntimeDebug, PartialEq, Eq, TypeInfo, MaxEncodedLen)] -pub struct OutboundLaneData { - /// Nonce of the oldest message that we haven't yet pruned. May point to not-yet-generated - /// message if all sent messages are already pruned. - pub oldest_unpruned_nonce: MessageNonce, - /// Nonce of the latest message, received by bridged chain. - pub latest_received_nonce: MessageNonce, - /// Nonce of the latest message, generated by us. - pub latest_generated_nonce: MessageNonce, -} - -impl Default for OutboundLaneData { - fn default() -> Self { - OutboundLaneData { - // it is 1 because we're pruning everything in [oldest_unpruned_nonce; - // latest_received_nonce] - oldest_unpruned_nonce: 1, - latest_received_nonce: 0, - latest_generated_nonce: 0, - } - } -} - -impl OutboundLaneData { - /// Return nonces of all currently queued messages (i.e. messages that we believe - /// are not delivered yet). - pub fn queued_messages(&self) -> RangeInclusive { - (self.latest_received_nonce + 1)..=self.latest_generated_nonce - } -} - -/// Calculate the number of messages that the relayers have delivered. -pub fn calc_relayers_rewards( - messages_relayers: VecDeque>, - received_range: &RangeInclusive, -) -> RelayersRewards -where - AccountId: sp_std::cmp::Ord, -{ - // remember to reward relayers that have delivered messages - // this loop is bounded by `T::MaxUnrewardedRelayerEntriesAtInboundLane` on the bridged chain - let mut relayers_rewards = RelayersRewards::new(); - for entry in messages_relayers { - let nonce_begin = sp_std::cmp::max(entry.messages.begin, *received_range.start()); - let nonce_end = sp_std::cmp::min(entry.messages.end, *received_range.end()); - if nonce_end >= nonce_begin { - *relayers_rewards.entry(entry.relayer).or_default() += nonce_end - nonce_begin + 1; - } - } - relayers_rewards -} - -/// A minimized version of `pallet-bridge-messages::Call` that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeMessagesCall { - /// `pallet-bridge-messages::Call::receive_messages_proof` - #[codec(index = 2)] - receive_messages_proof { - /// Account id of relayer at the **bridged** chain. - relayer_id_at_bridged_chain: AccountId, - /// Messages proof. - proof: MessagesProof, - /// A number of messages in the proof. - messages_count: u32, - /// Total dispatch weight of messages in the proof. - dispatch_weight: Weight, - }, - /// `pallet-bridge-messages::Call::receive_messages_delivery_proof` - #[codec(index = 3)] - receive_messages_delivery_proof { - /// Messages delivery proof. - proof: MessagesDeliveryProof, - /// "Digest" of unrewarded relayers state at the bridged chain. - relayers_state: UnrewardedRelayersState, - }, -} - -/// Error that happens during message verification. -#[derive(Encode, Decode, RuntimeDebug, PartialEq, Eq, PalletError, TypeInfo)] -pub enum VerificationError { - /// The message proof is empty. - EmptyMessageProof, - /// Error returned by the bridged header chain. - HeaderChain(HeaderChainError), - /// Error returned while reading/decoding inbound lane data from the storage proof. - InboundLaneStorage(StorageProofError), - /// The declared message weight is incorrect. - InvalidMessageWeight, - /// Declared messages count doesn't match actual value. - MessagesCountMismatch, - /// Error returned while reading/decoding message data from the storage proof. - MessageStorage(StorageProofError), - /// The message is too large. - MessageTooLarge, - /// Error returned while reading/decoding outbound lane data from the storage proof. - OutboundLaneStorage(StorageProofError), - /// Storage proof related error. - StorageProof(StorageProofError), - /// Custom error - Other(#[codec(skip)] &'static str), -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn total_unrewarded_messages_does_not_overflow() { - let lane_data = InboundLaneData { - relayers: vec![ - UnrewardedRelayer { relayer: 1, messages: DeliveredMessages::new(0) }, - UnrewardedRelayer { - relayer: 2, - messages: DeliveredMessages::new(MessageNonce::MAX), - }, - ] - .into_iter() - .collect(), - last_confirmed_nonce: 0, - }; - assert_eq!(lane_data.total_unrewarded_messages(), MessageNonce::MAX); - } - - #[test] - fn inbound_lane_data_returns_correct_hint() { - let test_cases = vec![ - // single relayer, multiple messages - (1, 128u8), - // multiple relayers, single message per relayer - (128u8, 128u8), - // several messages per relayer - (13u8, 128u8), - ]; - for (relayer_entries, messages_count) in test_cases { - let expected_size = InboundLaneData::::encoded_size_hint(relayer_entries as _); - let actual_size = InboundLaneData { - relayers: (1u8..=relayer_entries) - .map(|i| UnrewardedRelayer { - relayer: i, - messages: DeliveredMessages::new(i as _), - }) - .collect(), - last_confirmed_nonce: messages_count as _, - } - .encode() - .len(); - let difference = (expected_size.unwrap() as f64 - actual_size as f64).abs(); - assert!( - difference / (std::cmp::min(actual_size, expected_size.unwrap()) as f64) < 0.1, - "Too large difference between actual ({actual_size}) and expected ({expected_size:?}) inbound lane data size. Test case: {relayer_entries}+{messages_count}", - ); - } - } - - #[test] - fn contains_result_works() { - let delivered_messages = DeliveredMessages { begin: 100, end: 150 }; - - assert!(!delivered_messages.contains_message(99)); - assert!(delivered_messages.contains_message(100)); - assert!(delivered_messages.contains_message(150)); - assert!(!delivered_messages.contains_message(151)); - } - - #[test] - fn lane_id_debug_format_matches_inner_array_format() { - assert_eq!(format!("{:?}", LaneId([0, 0, 0, 0])), format!("{:?}", [0, 0, 0, 0]),); - } -} diff --git a/primitives/messages/src/source_chain.rs b/primitives/messages/src/source_chain.rs deleted file mode 100644 index f4aefd9735583e265c3e44713f13f81ae63ba276..0000000000000000000000000000000000000000 --- a/primitives/messages/src/source_chain.rs +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of messages module, that are used on the source chain. - -use crate::{InboundLaneData, LaneId, MessageNonce, VerificationError}; - -use crate::UnrewardedRelayer; -use bp_runtime::Size; -use frame_support::Parameter; -use sp_core::RuntimeDebug; -use sp_std::{ - collections::{btree_map::BTreeMap, vec_deque::VecDeque}, - fmt::Debug, - ops::RangeInclusive, -}; - -/// Number of messages, delivered by relayers. -pub type RelayersRewards = BTreeMap; - -/// Target chain API. Used by source chain to verify target chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -/// -/// The `Payload` type here means the payload of the message that is sent from the -/// source chain to the target chain. The `AccountId` type here means the account -/// type used by the source chain. -pub trait TargetHeaderChain { - /// Proof that messages have been received by target chain. - type MessagesDeliveryProof: Parameter + Size; - - /// Verify message payload before we accept it. - /// - /// **CAUTION**: this is very important function. Incorrect implementation may lead - /// to stuck lanes and/or relayers loses. - /// - /// The proper implementation must ensure that the delivery-transaction with this - /// payload would (at least) be accepted into target chain transaction pool AND - /// eventually will be successfully mined. The most obvious incorrect implementation - /// example would be implementation for BTC chain that accepts payloads larger than - /// 1MB. BTC nodes aren't accepting transactions that are larger than 1MB, so relayer - /// will be unable to craft valid transaction => this (and all subsequent) messages will - /// never be delivered. - fn verify_message(payload: &Payload) -> Result<(), VerificationError>; - - /// Verify messages delivery proof and return lane && nonce of the latest received message. - fn verify_messages_delivery_proof( - proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError>; -} - -/// Manages payments that are happening at the source chain during delivery confirmation -/// transaction. -pub trait DeliveryConfirmationPayments { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Pay rewards for delivering messages to the given relayers. - /// - /// The implementation may also choose to pay reward to the `confirmation_relayer`, which is - /// a relayer that has submitted delivery confirmation transaction. - /// - /// Returns number of actually rewarded relayers. - fn pay_reward( - lane_id: LaneId, - messages_relayers: VecDeque>, - confirmation_relayer: &AccountId, - received_range: &RangeInclusive, - ) -> MessageNonce; -} - -impl DeliveryConfirmationPayments for () { - type Error = &'static str; - - fn pay_reward( - _lane_id: LaneId, - _messages_relayers: VecDeque>, - _confirmation_relayer: &AccountId, - _received_range: &RangeInclusive, - ) -> MessageNonce { - // this implementation is not rewarding relayers at all - 0 - } -} - -/// Callback that is called at the source chain (bridge hub) when we get delivery confirmation -/// for new messages. -pub trait OnMessagesDelivered { - /// New messages delivery has been confirmed. - /// - /// The only argument of the function is the number of yet undelivered messages - fn on_messages_delivered(lane: LaneId, enqueued_messages: MessageNonce); -} - -impl OnMessagesDelivered for () { - fn on_messages_delivered(_lane: LaneId, _enqueued_messages: MessageNonce) {} -} - -/// Send message artifacts. -#[derive(Eq, RuntimeDebug, PartialEq)] -pub struct SendMessageArtifacts { - /// Nonce of the message. - pub nonce: MessageNonce, - /// Number of enqueued messages at the lane, after the message is sent. - pub enqueued_messages: MessageNonce, -} - -/// Messages bridge API to be used from other pallets. -pub trait MessagesBridge { - /// Error type. - type Error: Debug; - - /// Intermediary structure returned by `validate_message()`. - /// - /// It can than be passed to `send_message()` in order to actually send the message - /// on the bridge. - type SendMessageArgs; - - /// Check if the message can be sent over the bridge. - fn validate_message( - lane: LaneId, - message: &Payload, - ) -> Result; - - /// Send message over the bridge. - /// - /// Returns unique message nonce or error if send has failed. - fn send_message(message: Self::SendMessageArgs) -> SendMessageArtifacts; -} - -/// Structure that may be used in place of `TargetHeaderChain` and -/// `MessageDeliveryAndDispatchPayment` on chains, where outbound messages are forbidden. -pub struct ForbidOutboundMessages; - -/// Error message that is used in `ForbidOutboundMessages` implementation. -const ALL_OUTBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all outbound messages"; - -impl TargetHeaderChain for ForbidOutboundMessages { - type MessagesDeliveryProof = (); - - fn verify_message(_payload: &Payload) -> Result<(), VerificationError> { - Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED)) - } - - fn verify_messages_delivery_proof( - _proof: Self::MessagesDeliveryProof, - ) -> Result<(LaneId, InboundLaneData), VerificationError> { - Err(VerificationError::Other(ALL_OUTBOUND_MESSAGES_REJECTED)) - } -} - -impl DeliveryConfirmationPayments for ForbidOutboundMessages { - type Error = &'static str; - - fn pay_reward( - _lane_id: LaneId, - _messages_relayers: VecDeque>, - _confirmation_relayer: &AccountId, - _received_range: &RangeInclusive, - ) -> MessageNonce { - 0 - } -} diff --git a/primitives/messages/src/storage_keys.rs b/primitives/messages/src/storage_keys.rs deleted file mode 100644 index 8eedf8fcc7ac98ae300ca0485a0827afd3cd1bb5..0000000000000000000000000000000000000000 --- a/primitives/messages/src/storage_keys.rs +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Storage keys of bridge messages pallet. - -/// Name of the `OPERATING_MODE_VALUE_NAME` storage value. -pub const OPERATING_MODE_VALUE_NAME: &str = "PalletOperatingMode"; -/// Name of the `OutboundMessages` storage map. -pub const OUTBOUND_MESSAGES_MAP_NAME: &str = "OutboundMessages"; -/// Name of the `OutboundLanes` storage map. -pub const OUTBOUND_LANES_MAP_NAME: &str = "OutboundLanes"; -/// Name of the `InboundLanes` storage map. -pub const INBOUND_LANES_MAP_NAME: &str = "InboundLanes"; - -use crate::{LaneId, MessageKey, MessageNonce}; - -use codec::Encode; -use frame_support::Blake2_128Concat; -use sp_core::storage::StorageKey; - -/// Storage key of the `PalletOperatingMode` value in the runtime storage. -pub fn operating_mode_key(pallet_prefix: &str) -> StorageKey { - StorageKey( - bp_runtime::storage_value_final_key( - pallet_prefix.as_bytes(), - OPERATING_MODE_VALUE_NAME.as_bytes(), - ) - .to_vec(), - ) -} - -/// Storage key of the outbound message in the runtime storage. -pub fn message_key(pallet_prefix: &str, lane: &LaneId, nonce: MessageNonce) -> StorageKey { - bp_runtime::storage_map_final_key::( - pallet_prefix, - OUTBOUND_MESSAGES_MAP_NAME, - &MessageKey { lane_id: *lane, nonce }.encode(), - ) -} - -/// Storage key of the outbound message lane state in the runtime storage. -pub fn outbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { - bp_runtime::storage_map_final_key::( - pallet_prefix, - OUTBOUND_LANES_MAP_NAME, - &lane.encode(), - ) -} - -/// Storage key of the inbound message lane state in the runtime storage. -pub fn inbound_lane_data_key(pallet_prefix: &str, lane: &LaneId) -> StorageKey { - bp_runtime::storage_map_final_key::( - pallet_prefix, - INBOUND_LANES_MAP_NAME, - &lane.encode(), - ) -} - -#[cfg(test)] -mod tests { - use super::*; - use hex_literal::hex; - - #[test] - fn operating_mode_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is possibly - // breaking all existing message relays. - let storage_key = operating_mode_key("BridgeMessages").0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed010f4cf0917788d791142ff6c1f216e7b3").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn storage_message_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // all previously crafted messages proofs. - let storage_key = message_key("BridgeMessages", &LaneId(*b"test"), 42).0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed018a395e6242c6813b196ca31ed0547ea79446af0e09063bd4a7874aef8a997cec746573742a00000000000000").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn outbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // all previously crafted outbound lane state proofs. - let storage_key = outbound_lane_data_key("BridgeMessages", &LaneId(*b"test")).0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed0196c246acb9b55077390e3ca723a0ca1f44a8995dd50b6657a037a7839304535b74657374").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } - - #[test] - fn inbound_lane_data_key_computed_properly() { - // If this test fails, then something has been changed in module storage that is breaking - // all previously crafted inbound lane state proofs. - let storage_key = inbound_lane_data_key("BridgeMessages", &LaneId(*b"test")).0; - assert_eq!( - storage_key, - hex!("dd16c784ebd3390a9bc0357c7511ed01e5f83cf83f2127eb47afdc35d6e43fab44a8995dd50b6657a037a7839304535b74657374").to_vec(), - "Unexpected storage key: {}", - hex::encode(&storage_key), - ); - } -} diff --git a/primitives/messages/src/target_chain.rs b/primitives/messages/src/target_chain.rs deleted file mode 100644 index 388ce16ccdc06d3e2c42c3a094aae4d6180a0d09..0000000000000000000000000000000000000000 --- a/primitives/messages/src/target_chain.rs +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of messages module, that are used on the target chain. - -use crate::{ - LaneId, Message, MessageKey, MessageNonce, MessagePayload, OutboundLaneData, VerificationError, -}; - -use bp_runtime::{messages::MessageDispatchResult, Size}; -use codec::{Decode, Encode, Error as CodecError}; -use frame_support::{weights::Weight, Parameter}; -use scale_info::TypeInfo; -use sp_core::RuntimeDebug; -use sp_std::{collections::btree_map::BTreeMap, fmt::Debug, marker::PhantomData, prelude::*}; - -/// Proved messages from the source chain. -pub type ProvedMessages = BTreeMap>; - -/// Proved messages from single lane of the source chain. -#[derive(RuntimeDebug, Encode, Decode, Clone, PartialEq, Eq, TypeInfo)] -pub struct ProvedLaneMessages { - /// Optional outbound lane state. - pub lane_state: Option, - /// Messages sent through this lane. - pub messages: Vec, -} - -/// Message data with decoded dispatch payload. -#[derive(RuntimeDebug)] -pub struct DispatchMessageData { - /// Result of dispatch payload decoding. - pub payload: Result, -} - -/// Message with decoded dispatch payload. -#[derive(RuntimeDebug)] -pub struct DispatchMessage { - /// Message key. - pub key: MessageKey, - /// Message data with decoded dispatch payload. - pub data: DispatchMessageData, -} - -/// Source chain API. Used by target chain, to verify source chain proofs. -/// -/// All implementations of this trait should only work with finalized data that -/// can't change. Wrong implementation may lead to invalid lane states (i.e. lane -/// that's stuck) and/or processing messages without paying fees. -pub trait SourceHeaderChain { - /// Proof that messages are sent from source chain. This may also include proof - /// of corresponding outbound lane states. - type MessagesProof: Parameter + Size; - - /// Verify messages proof and return proved messages. - /// - /// Returns error if either proof is incorrect, or the number of messages in the proof - /// is not matching the `messages_count`. - /// - /// Messages vector is required to be sorted by nonce within each lane. Out-of-order - /// messages will be rejected. - /// - /// The `messages_count` argument verification (sane limits) is supposed to be made - /// outside this function. This function only verifies that the proof declares exactly - /// `messages_count` messages. - fn verify_messages_proof( - proof: Self::MessagesProof, - messages_count: u32, - ) -> Result, VerificationError>; -} - -/// Called when inbound message is received. -pub trait MessageDispatch { - /// Decoded message payload type. Valid message may contain invalid payload. In this case - /// message is delivered, but dispatch fails. Therefore, two separate types of payload - /// (opaque `MessagePayload` used in delivery and this `DispatchPayload` used in dispatch). - type DispatchPayload: Decode; - - /// Fine-grained result of single message dispatch (for better diagnostic purposes) - type DispatchLevelResult: Clone + sp_std::fmt::Debug + Eq; - - /// Returns `true` if dispatcher is ready to accept additional messages. The `false` should - /// be treated as a hint by both dispatcher and its consumers - i.e. dispatcher shall not - /// simply drop messages if it returns `false`. The consumer may still call the `dispatch` - /// if dispatcher has returned `false`. - /// - /// We check it in the messages delivery transaction prologue. So if it becomes `false` - /// after some portion of messages is already dispatched, it doesn't fail the whole transaction. - fn is_active() -> bool; - - /// Estimate dispatch weight. - /// - /// This function must return correct upper bound of dispatch weight. The return value - /// of this function is expected to match return value of the corresponding - /// `FromInboundLaneApi::message_details().dispatch_weight` call. - fn dispatch_weight(message: &mut DispatchMessage) -> Weight; - - /// Called when inbound message is received. - /// - /// It is up to the implementers of this trait to determine whether the message - /// is invalid (i.e. improperly encoded, has too large weight, ...) or not. - fn dispatch( - message: DispatchMessage, - ) -> MessageDispatchResult; -} - -/// Manages payments that are happening at the target chain during message delivery transaction. -pub trait DeliveryPayments { - /// Error type. - type Error: Debug + Into<&'static str>; - - /// Pay rewards for delivering messages to the given relayer. - /// - /// This method is called during message delivery transaction which has been submitted - /// by the `relayer`. The transaction brings `total_messages` messages but only - /// `valid_messages` have been accepted. The post-dispatch transaction weight is the - /// `actual_weight`. - fn pay_reward( - relayer: AccountId, - total_messages: MessageNonce, - valid_messages: MessageNonce, - actual_weight: Weight, - ); -} - -impl Default for ProvedLaneMessages { - fn default() -> Self { - ProvedLaneMessages { lane_state: None, messages: Vec::new() } - } -} - -impl From for DispatchMessage { - fn from(message: Message) -> Self { - DispatchMessage { key: message.key, data: message.payload.into() } - } -} - -impl From for DispatchMessageData { - fn from(payload: MessagePayload) -> Self { - DispatchMessageData { payload: DispatchPayload::decode(&mut &payload[..]) } - } -} - -impl DeliveryPayments for () { - type Error = &'static str; - - fn pay_reward( - _relayer: AccountId, - _total_messages: MessageNonce, - _valid_messages: MessageNonce, - _actual_weight: Weight, - ) { - // this implementation is not rewarding relayer at all - } -} - -/// Structure that may be used in place of `SourceHeaderChain` and `MessageDispatch` on chains, -/// where inbound messages are forbidden. -pub struct ForbidInboundMessages( - PhantomData<(MessagesProof, DispatchPayload)>, -); - -/// Error message that is used in `ForbidInboundMessages` implementation. -const ALL_INBOUND_MESSAGES_REJECTED: &str = - "This chain is configured to reject all inbound messages"; - -impl SourceHeaderChain - for ForbidInboundMessages -{ - type MessagesProof = MessagesProof; - - fn verify_messages_proof( - _proof: Self::MessagesProof, - _messages_count: u32, - ) -> Result, VerificationError> { - Err(VerificationError::Other(ALL_INBOUND_MESSAGES_REJECTED)) - } -} - -impl MessageDispatch - for ForbidInboundMessages -{ - type DispatchPayload = DispatchPayload; - type DispatchLevelResult = (); - - fn is_active() -> bool { - false - } - - fn dispatch_weight(_message: &mut DispatchMessage) -> Weight { - Weight::MAX - } - - fn dispatch( - _: DispatchMessage, - ) -> MessageDispatchResult { - MessageDispatchResult { unspent_weight: Weight::zero(), dispatch_level_result: () } - } -} diff --git a/primitives/parachains/Cargo.toml b/primitives/parachains/Cargo.toml deleted file mode 100644 index e62ae6a8d423c2c6b8fac8cfe94f5668685b12b7..0000000000000000000000000000000000000000 --- a/primitives/parachains/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "bp-parachains" -description = "Primitives of parachains module." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -impl-trait-for-tuples = "0.2" -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } - -# Bridge dependencies - -bp-header-chain = { path = "../header-chain", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/parachains/src/lib.rs b/primitives/parachains/src/lib.rs deleted file mode 100644 index 142c6e9b08923fdd2934fb7f3b9c2d12788fc8b9..0000000000000000000000000000000000000000 --- a/primitives/parachains/src/lib.rs +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of parachains module. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use bp_header_chain::StoredHeaderData; - -use bp_polkadot_core::{ - parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}, - BlockNumber as RelayBlockNumber, Hash as RelayBlockHash, -}; -use bp_runtime::{ - BlockNumberOf, Chain, HashOf, HeaderOf, Parachain, StorageDoubleMapKeyProvider, - StorageMapKeyProvider, -}; -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::{Blake2_128Concat, Twox64Concat}; -use scale_info::TypeInfo; -use sp_core::storage::StorageKey; -use sp_runtime::{traits::Header as HeaderT, RuntimeDebug}; -use sp_std::{marker::PhantomData, prelude::*}; - -/// Best known parachain head hash. -#[derive(Clone, Decode, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)] -pub struct BestParaHeadHash { - /// Number of relay block where this head has been read. - /// - /// Parachain head is opaque to relay chain. So we can't simply decode it as a header of - /// parachains and call `block_number()` on it. Instead, we're using the fact that parachain - /// head is always built on top of previous head (because it is blockchain) and relay chain - /// always imports parachain heads in order. What it means for us is that at any given - /// **finalized** relay block `B`, head of parachain will be ancestor (or the same) of all - /// parachain heads available at descendants of `B`. - pub at_relay_block_number: RelayBlockNumber, - /// Hash of parachain head. - pub head_hash: ParaHash, -} - -/// Best known parachain head as it is stored in the runtime storage. -#[derive(Decode, Encode, MaxEncodedLen, PartialEq, RuntimeDebug, TypeInfo)] -pub struct ParaInfo { - /// Best known parachain head hash. - pub best_head_hash: BestParaHeadHash, - /// Current ring buffer position for this parachain. - pub next_imported_hash_position: u32, -} - -/// Returns runtime storage key of given parachain head at the source chain. -/// -/// The head is stored by the `paras` pallet in the `Heads` map. -pub fn parachain_head_storage_key_at_source( - paras_pallet_name: &str, - para_id: ParaId, -) -> StorageKey { - bp_runtime::storage_map_final_key::(paras_pallet_name, "Heads", ¶_id.encode()) -} - -/// Can be use to access the runtime storage key of the parachains info at the target chain. -/// -/// The info is stored by the `pallet-bridge-parachains` pallet in the `ParasInfo` map. -pub struct ParasInfoKeyProvider; -impl StorageMapKeyProvider for ParasInfoKeyProvider { - const MAP_NAME: &'static str = "ParasInfo"; - - type Hasher = Blake2_128Concat; - type Key = ParaId; - type Value = ParaInfo; -} - -/// Can be use to access the runtime storage key of the parachain head at the target chain. -/// -/// The head is stored by the `pallet-bridge-parachains` pallet in the `ImportedParaHeads` map. -pub struct ImportedParaHeadsKeyProvider; -impl StorageDoubleMapKeyProvider for ImportedParaHeadsKeyProvider { - const MAP_NAME: &'static str = "ImportedParaHeads"; - - type Hasher1 = Blake2_128Concat; - type Key1 = ParaId; - type Hasher2 = Blake2_128Concat; - type Key2 = ParaHash; - type Value = ParaStoredHeaderData; -} - -/// Stored data of the parachain head. It is encoded version of the -/// `bp_runtime::StoredHeaderData` structure. -/// -/// We do not know exact structure of the parachain head, so we always store encoded version -/// of the `bp_runtime::StoredHeaderData`. It is only decoded when we talk about specific parachain. -#[derive(Clone, Decode, Encode, PartialEq, RuntimeDebug, TypeInfo)] -pub struct ParaStoredHeaderData(pub Vec); - -impl ParaStoredHeaderData { - /// Decode stored parachain head data. - pub fn decode_parachain_head_data( - &self, - ) -> Result, HashOf>, codec::Error> { - StoredHeaderData::, HashOf>::decode(&mut &self.0[..]) - } -} - -/// Stored parachain head data builder. -pub trait ParaStoredHeaderDataBuilder { - /// Maximal parachain head size that we may accept for free. All heads above - /// this limit are submitted for a regular fee. - fn max_free_head_size() -> u32; - - /// Return number of parachains that are supported by this builder. - fn supported_parachains() -> u32; - - /// Try to build head data from encoded head of parachain with given id. - fn try_build(para_id: ParaId, para_head: &ParaHead) -> Option; -} - -/// Helper for using single parachain as `ParaStoredHeaderDataBuilder`. -pub struct SingleParaStoredHeaderDataBuilder(PhantomData); - -impl ParaStoredHeaderDataBuilder for SingleParaStoredHeaderDataBuilder { - fn max_free_head_size() -> u32 { - C::MAX_HEADER_SIZE - } - - fn supported_parachains() -> u32 { - 1 - } - - fn try_build(para_id: ParaId, para_head: &ParaHead) -> Option { - if para_id == ParaId(C::PARACHAIN_ID) { - let header = HeaderOf::::decode(&mut ¶_head.0[..]).ok()?; - return Some(ParaStoredHeaderData( - StoredHeaderData { number: *header.number(), state_root: *header.state_root() } - .encode(), - )) - } - None - } -} - -// Tries to build header data from each tuple member, short-circuiting on first successful one. -#[impl_trait_for_tuples::impl_for_tuples(1, 30)] -#[tuple_types_custom_trait_bound(Parachain)] -impl ParaStoredHeaderDataBuilder for C { - fn max_free_head_size() -> u32 { - let mut result = 0_u32; - for_tuples!( #( - result = sp_std::cmp::max( - result, - SingleParaStoredHeaderDataBuilder::::max_free_head_size(), - ); - )* ); - result - } - - fn supported_parachains() -> u32 { - let mut result = 0; - for_tuples!( #( - result += SingleParaStoredHeaderDataBuilder::::supported_parachains(); - )* ); - result - } - - fn try_build(para_id: ParaId, para_head: &ParaHead) -> Option { - for_tuples!( #( - let maybe_para_head = SingleParaStoredHeaderDataBuilder::::try_build(para_id, para_head); - if let Some(maybe_para_head) = maybe_para_head { - return Some(maybe_para_head); - } - )* ); - - None - } -} - -/// A minimized version of `pallet-bridge-parachains::Call` that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum BridgeParachainCall { - /// `pallet-bridge-parachains::Call::submit_parachain_heads` - #[codec(index = 0)] - submit_parachain_heads { - /// Relay chain block, for which we have submitted the `parachain_heads_proof`. - at_relay_block: (RelayBlockNumber, RelayBlockHash), - /// Parachain identifiers and their head hashes. - parachains: Vec<(ParaId, ParaHash)>, - /// Parachain heads proof. - parachain_heads_proof: ParaHeadsProof, - }, -} diff --git a/primitives/polkadot-core/Cargo.toml b/primitives/polkadot-core/Cargo.toml deleted file mode 100644 index c28f3f2e34e46fa1233ad2e1936cb8ffdb2e7003..0000000000000000000000000000000000000000 --- a/primitives/polkadot-core/Cargo.toml +++ /dev/null @@ -1,48 +0,0 @@ -[package] -name = "bp-polkadot-core" -description = "Primitives of Polkadot-like runtime." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } -parity-util-mem = { version = "0.12.0", optional = true } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { default-features = false, features = ["derive"], optional = true, workspace = true } - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Based Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -hex = "0.4" - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "frame-system/std", - "parity-util-mem", - "scale-info/std", - "serde", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/polkadot-core/src/lib.rs b/primitives/polkadot-core/src/lib.rs deleted file mode 100644 index d59b99db4b586dde7b2d645ff44c34b94f865f24..0000000000000000000000000000000000000000 --- a/primitives/polkadot-core/src/lib.rs +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of the Polkadot-like chains. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_messages::MessageNonce; -use bp_runtime::{ - self, - extensions::{ - ChargeTransactionPayment, CheckEra, CheckGenesis, CheckNonZeroSender, CheckNonce, - CheckSpecVersion, CheckTxVersion, CheckWeight, GenericTransactionExtension, - TransactionExtensionSchema, - }, - EncodedOrDecodedCall, StorageMapKeyProvider, TransactionEra, -}; -use frame_support::{ - dispatch::DispatchClass, - parameter_types, - weights::{ - constants::{BlockExecutionWeight, WEIGHT_REF_TIME_PER_SECOND}, - Weight, - }, - Blake2_128Concat, -}; -use frame_system::limits; -use sp_core::{storage::StorageKey, Hasher as HasherT}; -use sp_runtime::{ - generic, - traits::{BlakeTwo256, IdentifyAccount, Verify}, - MultiAddress, MultiSignature, OpaqueExtrinsic, -}; -use sp_std::prelude::Vec; - -// Re-export's to avoid extra substrate dependencies in chain-specific crates. -pub use frame_support::{weights::constants::ExtrinsicBaseWeight, Parameter}; -pub use sp_runtime::{traits::Convert, Perbill}; - -pub mod parachains; - -/// Maximal number of GRANDPA authorities at Polkadot-like chains. -/// -/// Ideally, we would set it to the value of `MaxAuthorities` constant from bridged runtime -/// configurations. But right now it is set to the `100_000`, which makes PoV size for -/// our bridge hub parachains huge. So let's stick to the real-world value here. -/// -/// Right now both Kusama and Polkadot aim to have around 1000 validators. Let's be safe here and -/// take a bit more here. -pub const MAX_AUTHORITIES_COUNT: u32 = 1_256; - -/// Reasonable number of headers in the `votes_ancestries` on Polkadot-like chains. -/// -/// See [`bp-header-chain::ChainWithGrandpa`] for more details. -/// -/// This value comes from recent (December, 2023) Kusama and Polkadot headers. There are no -/// justifications with any additional headers in votes ancestry, so reasonable headers may -/// be set to zero. But we assume that there may be small GRANDPA lags, so we're leaving some -/// reserve here. -pub const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = 2; - -/// Average header size in `votes_ancestries` field of justification on Polkadot-like -/// chains. -/// -/// See [`bp-header-chain::ChainWithGrandpa`] for more details. -/// -/// This value comes from recent (December, 2023) Kusama headers. Most of headers are `327` bytes -/// there, but let's have some reserve and make it 1024. -pub const AVERAGE_HEADER_SIZE: u32 = 1024; - -/// Approximate maximal header size on Polkadot-like chains. -/// -/// See [`bp-header-chain::ChainWithGrandpa`] for more details. -/// -/// This value comes from recent (December, 2023) Kusama headers. Maximal header is a mandatory -/// header. In its SCALE-encoded form it is `113407` bytes. Let's have some reserve here. -pub const MAX_MANDATORY_HEADER_SIZE: u32 = 120 * 1024; - -/// Number of extra bytes (excluding size of storage value itself) of storage proof, built at -/// Polkadot-like chain. This mostly depends on number of entries in the storage trie. -/// Some reserve is reserved to account future chain growth. -/// -/// To compute this value, we've synced Kusama chain blocks [0; 6545733] to see if there were -/// any significant changes of the storage proof size (NO): -/// -/// - at block 3072 the storage proof size overhead was 579 bytes; -/// - at block 2479616 it was 578 bytes; -/// - at block 4118528 it was 711 bytes; -/// - at block 6540800 it was 779 bytes. -/// -/// The number of storage entries at the block 6546170 was 351207 and number of trie nodes in -/// the storage proof was 5 (log(16, 351207) ~ 4.6). -/// -/// So the assumption is that the storage proof size overhead won't be larger than 1024 in the -/// nearest future. If it'll ever break this barrier, then we'll need to update this constant -/// at next runtime upgrade. -pub const EXTRA_STORAGE_PROOF_SIZE: u32 = 1024; - -/// All Polkadot-like chains allow normal extrinsics to fill block up to 75 percent. -/// -/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. -const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); - -/// All Polkadot-like chains allow 2 seconds of compute with a 6-second average block time. -/// -/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. -pub const MAXIMUM_BLOCK_WEIGHT: Weight = - Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND.saturating_mul(2), u64::MAX); - -/// All Polkadot-like chains assume that an on-initialize consumes 1 percent of the weight on -/// average, hence a single extrinsic will not be allowed to consume more than -/// `AvailableBlockRatio - 1 percent`. -/// -/// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. -pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(1); - -parameter_types! { - /// All Polkadot-like chains have maximal block size set to 5MB. - /// - /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. - pub BlockLength: limits::BlockLength = limits::BlockLength::max_with_normal_ratio( - 5 * 1024 * 1024, - NORMAL_DISPATCH_RATIO, - ); - /// All Polkadot-like chains have the same block weights. - /// - /// This is a copy-paste from the Polkadot repo's `polkadot-runtime-common` crate. - pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() - .base_block(BlockExecutionWeight::get()) - .for_class(DispatchClass::all(), |weights| { - weights.base_extrinsic = ExtrinsicBaseWeight::get(); - }) - .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); - }) - .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); - // Operational transactions have an extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. - weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT, - ); - }) - .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) - .build_or_panic(); -} - -// TODO [#78] may need to be updated after https://github.com/paritytech/parity-bridges-common/issues/78 -/// Maximal number of messages in single delivery transaction. -pub const MAX_MESSAGES_IN_DELIVERY_TRANSACTION: MessageNonce = 128; - -/// Maximal number of bytes, included in the signed Polkadot-like transaction apart from the encoded -/// call itself. -/// -/// Can be computed by subtracting encoded call size from raw transaction size. -pub const TX_EXTRA_BYTES: u32 = 256; - -/// Re-export `time_units` to make usage easier. -pub use time_units::*; - -/// Human readable time units defined in terms of number of blocks. -pub mod time_units { - use super::BlockNumber; - - /// Milliseconds between Polkadot-like chain blocks. - pub const MILLISECS_PER_BLOCK: u64 = 6000; - /// Slot duration in Polkadot-like chain consensus algorithms. - pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; - - /// A minute, expressed in Polkadot-like chain blocks. - pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); - /// A hour, expressed in Polkadot-like chain blocks. - pub const HOURS: BlockNumber = MINUTES * 60; - /// A day, expressed in Polkadot-like chain blocks. - pub const DAYS: BlockNumber = HOURS * 24; -} - -/// Block number type used in Polkadot-like chains. -pub type BlockNumber = u32; - -/// Hash type used in Polkadot-like chains. -pub type Hash = ::Out; - -/// Hashing type. -pub type Hashing = BlakeTwo256; - -/// The type of object that can produce hashes on Polkadot-like chains. -pub type Hasher = BlakeTwo256; - -/// The header type used by Polkadot-like chains. -pub type Header = generic::Header; - -/// Signature type used by Polkadot-like chains. -pub type Signature = MultiSignature; - -/// Public key of account on Polkadot-like chains. -pub type AccountPublic = ::Signer; - -/// Id of account on Polkadot-like chains. -pub type AccountId = ::AccountId; - -/// Address of account on Polkadot-like chains. -pub type AccountAddress = MultiAddress; - -/// Nonce of a transaction on the Polkadot-like chains. -pub type Nonce = u32; - -/// Block type of Polkadot-like chains. -pub type Block = generic::Block; - -/// Polkadot-like block signed with a Justification. -pub type SignedBlock = generic::SignedBlock; - -/// The balance of an account on Polkadot-like chain. -pub type Balance = u128; - -/// Unchecked Extrinsic type. -pub type UncheckedExtrinsic = generic::UncheckedExtrinsic< - AccountAddress, - EncodedOrDecodedCall, - Signature, - TransactionExt, ->; - -/// Account address, used by the Polkadot-like chain. -pub type Address = MultiAddress; - -/// Returns maximal extrinsic size on all Polkadot-like chains. -pub fn max_extrinsic_size() -> u32 { - *BlockLength::get().max.get(DispatchClass::Normal) -} - -/// Returns maximal extrinsic weight on all Polkadot-like chains. -pub fn max_extrinsic_weight() -> Weight { - BlockWeights::get() - .get(DispatchClass::Normal) - .max_extrinsic - .unwrap_or(Weight::MAX) -} - -/// Provides a storage key for account data. -/// -/// We need to use this approach when we don't have access to the runtime. -/// The equivalent command to invoke in case full `Runtime` is known is this: -/// `let key = frame_system::Account::::storage_map_final_key(&account_id);` -pub struct AccountInfoStorageMapKeyProvider; - -impl StorageMapKeyProvider for AccountInfoStorageMapKeyProvider { - const MAP_NAME: &'static str = "Account"; - type Hasher = Blake2_128Concat; - type Key = AccountId; - // This should actually be `AccountInfo`, but we don't use this property in order to decode the - // data. So we use `Vec` as if we would work with encoded data. - type Value = Vec; -} - -impl AccountInfoStorageMapKeyProvider { - /// Name of the system pallet. - const PALLET_NAME: &'static str = "System"; - - /// Return storage key for given account data. - pub fn final_key(id: &AccountId) -> StorageKey { - ::final_key(Self::PALLET_NAME, id) - } -} - -/// Extra signed extension data that is used by most chains. -pub type CommonTransactionExtra = ( - CheckNonZeroSender, - CheckSpecVersion, - CheckTxVersion, - CheckGenesis, - CheckEra, - CheckNonce, - CheckWeight, - ChargeTransactionPayment, -); - -/// Extra transaction extension data that starts with `CommonTransactionExtra`. -pub type SuffixedCommonTransactionExtension = - GenericTransactionExtension<(CommonTransactionExtra, Suffix)>; - -/// Helper trait to define some extra methods on `SuffixedCommonTransactionExtension`. -pub trait SuffixedCommonTransactionExtensionExt { - /// Create signed extension from its components. - fn from_params( - spec_version: u32, - transaction_version: u32, - era: TransactionEra, - genesis_hash: Hash, - nonce: Nonce, - tip: Balance, - extra: (Suffix::Payload, Suffix::Implicit), - ) -> Self; - - /// Return transaction nonce. - fn nonce(&self) -> Nonce; - - /// Return transaction tip. - fn tip(&self) -> Balance; -} - -impl SuffixedCommonTransactionExtensionExt - for SuffixedCommonTransactionExtension -where - Suffix: TransactionExtensionSchema, -{ - fn from_params( - spec_version: u32, - transaction_version: u32, - era: TransactionEra, - genesis_hash: Hash, - nonce: Nonce, - tip: Balance, - extra: (Suffix::Payload, Suffix::Implicit), - ) -> Self { - GenericTransactionExtension::new( - ( - ( - (), // non-zero sender - (), // spec version - (), // tx version - (), // genesis - era.frame_era(), // era - nonce.into(), // nonce (compact encoding) - (), // Check weight - tip.into(), // transaction payment / tip (compact encoding) - ), - extra.0, - ), - Some(( - ( - (), - spec_version, - transaction_version, - genesis_hash, - era.signed_payload(genesis_hash), - (), - (), - (), - ), - extra.1, - )), - ) - } - - fn nonce(&self) -> Nonce { - let common_payload = self.payload.0; - common_payload.5 .0 - } - - fn tip(&self) -> Balance { - let common_payload = self.payload.0; - common_payload.7 .0 - } -} - -/// Signed extension that is used by most chains. -pub type CommonTransactionExtension = SuffixedCommonTransactionExtension<()>; - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn should_generate_storage_key() { - let acc = [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ] - .into(); - let key = AccountInfoStorageMapKeyProvider::final_key(&acc); - assert_eq!(hex::encode(key), "26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da92dccd599abfe1920a1cff8a7358231430102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20"); - } -} diff --git a/primitives/polkadot-core/src/parachains.rs b/primitives/polkadot-core/src/parachains.rs deleted file mode 100644 index 433cd2845abd9ae95687d6f1d024765ee3bd2ebb..0000000000000000000000000000000000000000 --- a/primitives/polkadot-core/src/parachains.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of polkadot-like chains, that are related to parachains functionality. -//! -//! Even though this (bridges) repository references polkadot repository, we can't -//! reference polkadot crates from pallets. That's because bridges repository is -//! included in the Cumulus repository and included pallets are used by Cumulus -//! parachains. Having pallets that are referencing polkadot, would mean that there may -//! be two versions of polkadot crates included in the runtime. Which is bad. - -use bp_runtime::{RawStorageProof, Size}; -use codec::{CompactAs, Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; -use sp_core::Hasher; -use sp_runtime::RuntimeDebug; -use sp_std::vec::Vec; - -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; - -#[cfg(feature = "std")] -use parity_util_mem::MallocSizeOf; - -/// Parachain id. -/// -/// This is an equivalent of the `polkadot_parachain_primitives::Id`, which is a compact-encoded -/// `u32`. -#[derive( - Clone, - CompactAs, - Copy, - Decode, - Default, - Encode, - Eq, - Hash, - MaxEncodedLen, - Ord, - PartialEq, - PartialOrd, - RuntimeDebug, - TypeInfo, -)] -pub struct ParaId(pub u32); - -impl From for ParaId { - fn from(id: u32) -> Self { - ParaId(id) - } -} - -/// Parachain head. -/// -/// This is an equivalent of the `polkadot_parachain_primitives::HeadData`. -/// -/// The parachain head means (at least in Cumulus) a SCALE-encoded parachain header. -#[derive( - PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode, RuntimeDebug, TypeInfo, Default, -)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, MallocSizeOf))] -pub struct ParaHead(pub Vec); - -impl ParaHead { - /// Returns the hash of this head data. - pub fn hash(&self) -> crate::Hash { - sp_runtime::traits::BlakeTwo256::hash(&self.0) - } -} - -/// Parachain head hash. -pub type ParaHash = crate::Hash; - -/// Parachain head hasher. -pub type ParaHasher = crate::Hasher; - -/// Raw storage proof of parachain heads, stored in polkadot-like chain runtime. -#[derive(Clone, Decode, Encode, Eq, PartialEq, RuntimeDebug, TypeInfo)] -pub struct ParaHeadsProof { - /// Unverified storage proof of finalized parachain heads. - pub storage_proof: RawStorageProof, -} - -impl Size for ParaHeadsProof { - fn size(&self) -> u32 { - u32::try_from( - self.storage_proof - .iter() - .fold(0usize, |sum, node| sum.saturating_add(node.len())), - ) - .unwrap_or(u32::MAX) - } -} diff --git a/primitives/relayers/Cargo.toml b/primitives/relayers/Cargo.toml deleted file mode 100644 index 19aed6b038bcd9139020c8b68bd69eeea73ed0fe..0000000000000000000000000000000000000000 --- a/primitives/relayers/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "bp-relayers" -description = "Primitives of relayers module." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } - -# Bridge Dependencies - -bp-messages = { path = "../messages", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[dev-dependencies] -hex = "0.4" -hex-literal = "0.4" - -[features] -default = ["std"] -std = [ - "bp-messages/std", - "bp-runtime/std", - "codec/std", - "frame-support/std", - "scale-info/std", - "sp-runtime/std", - "sp-std/std", -] diff --git a/primitives/relayers/src/lib.rs b/primitives/relayers/src/lib.rs deleted file mode 100644 index 2a9ef6a8e1e9aba999ea90045447f7a87fb3813b..0000000000000000000000000000000000000000 --- a/primitives/relayers/src/lib.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of messages module. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -pub use registration::{ExplicitOrAccountParams, Registration, StakeAndSlash}; - -use bp_messages::LaneId; -use bp_runtime::{ChainId, StorageDoubleMapKeyProvider}; -use frame_support::{traits::tokens::Preservation, Blake2_128Concat, Identity}; -use scale_info::TypeInfo; -use sp_runtime::{ - codec::{Codec, Decode, Encode, EncodeLike, MaxEncodedLen}, - traits::AccountIdConversion, - TypeId, -}; -use sp_std::{fmt::Debug, marker::PhantomData}; - -mod registration; - -/// The owner of the sovereign account that should pay the rewards. -/// -/// Each of the 2 final points connected by a bridge owns a sovereign account at each end of the -/// bridge. So here, at this end of the bridge there can be 2 sovereign accounts that pay rewards. -#[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)] -pub enum RewardsAccountOwner { - /// The sovereign account of the final chain on this end of the bridge. - ThisChain, - /// The sovereign account of the final chain on the other end of the bridge. - BridgedChain, -} - -/// Structure used to identify the account that pays a reward to the relayer. -/// -/// A bridge connects 2 bridge ends. Each one is located on a separate relay chain. The bridge ends -/// can be the final destinations of the bridge, or they can be intermediary points -/// (e.g. a bridge hub) used to forward messages between pairs of parachains on the bridged relay -/// chains. A pair of such parachains is connected using a bridge lane. Each of the 2 final -/// destinations of a bridge lane must have a sovereign account at each end of the bridge and each -/// of the sovereign accounts will pay rewards for different operations. So we need multiple -/// parameters to identify the account that pays a reward to the relayer. -#[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)] -pub struct RewardsAccountParams { - lane_id: LaneId, - bridged_chain_id: ChainId, - owner: RewardsAccountOwner, -} - -impl RewardsAccountParams { - /// Create a new instance of `RewardsAccountParams`. - pub const fn new( - lane_id: LaneId, - bridged_chain_id: ChainId, - owner: RewardsAccountOwner, - ) -> Self { - Self { lane_id, bridged_chain_id, owner } - } -} - -impl TypeId for RewardsAccountParams { - const TYPE_ID: [u8; 4] = *b"brap"; -} - -/// Reward payment procedure. -pub trait PaymentProcedure { - /// Error that may be returned by the procedure. - type Error: Debug; - - /// Pay reward to the relayer from the account with provided params. - fn pay_reward( - relayer: &Relayer, - rewards_account_params: RewardsAccountParams, - reward: Reward, - ) -> Result<(), Self::Error>; -} - -impl PaymentProcedure for () { - type Error = &'static str; - - fn pay_reward(_: &Relayer, _: RewardsAccountParams, _: Reward) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Reward payment procedure that does `balances::transfer` call from the account, derived from -/// given params. -pub struct PayRewardFromAccount(PhantomData<(T, Relayer)>); - -impl PayRewardFromAccount -where - Relayer: Decode + Encode, -{ - /// Return account that pays rewards based on the provided parameters. - pub fn rewards_account(params: RewardsAccountParams) -> Relayer { - params.into_sub_account_truncating(b"rewards-account") - } -} - -impl PaymentProcedure for PayRewardFromAccount -where - T: frame_support::traits::fungible::Mutate, - Relayer: Decode + Encode + Eq, -{ - type Error = sp_runtime::DispatchError; - - fn pay_reward( - relayer: &Relayer, - rewards_account_params: RewardsAccountParams, - reward: T::Balance, - ) -> Result<(), Self::Error> { - T::transfer( - &Self::rewards_account(rewards_account_params), - relayer, - reward, - Preservation::Expendable, - ) - .map(drop) - } -} - -/// Can be use to access the runtime storage key within the `RelayerRewards` map of the relayers -/// pallet. -pub struct RelayerRewardsKeyProvider(PhantomData<(AccountId, Reward)>); - -impl StorageDoubleMapKeyProvider for RelayerRewardsKeyProvider -where - AccountId: Codec + EncodeLike, - Reward: Codec + EncodeLike, -{ - const MAP_NAME: &'static str = "RelayerRewards"; - - type Hasher1 = Blake2_128Concat; - type Key1 = AccountId; - type Hasher2 = Identity; - type Key2 = RewardsAccountParams; - type Value = Reward; -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_messages::LaneId; - use sp_runtime::testing::H256; - - #[test] - fn different_lanes_are_using_different_accounts() { - assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId([0, 0, 0, 0]), - *b"test", - RewardsAccountOwner::ThisChain - )), - hex_literal::hex!("62726170000000007465737400726577617264732d6163636f756e7400000000") - .into(), - ); - - assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId([0, 0, 0, 1]), - *b"test", - RewardsAccountOwner::ThisChain - )), - hex_literal::hex!("62726170000000017465737400726577617264732d6163636f756e7400000000") - .into(), - ); - } - - #[test] - fn different_directions_are_using_different_accounts() { - assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId([0, 0, 0, 0]), - *b"test", - RewardsAccountOwner::ThisChain - )), - hex_literal::hex!("62726170000000007465737400726577617264732d6163636f756e7400000000") - .into(), - ); - - assert_eq!( - PayRewardFromAccount::<(), H256>::rewards_account(RewardsAccountParams::new( - LaneId([0, 0, 0, 0]), - *b"test", - RewardsAccountOwner::BridgedChain - )), - hex_literal::hex!("62726170000000007465737401726577617264732d6163636f756e7400000000") - .into(), - ); - } -} diff --git a/primitives/relayers/src/registration.rs b/primitives/relayers/src/registration.rs deleted file mode 100644 index 312e51516ba1063757464e706b94c16e1b099270..0000000000000000000000000000000000000000 --- a/primitives/relayers/src/registration.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Bridge relayers registration and slashing scheme. -//! -//! There is an option to add a refund-relayer signed extension that will compensate -//! relayer costs of the message delivery and confirmation transactions (as well as -//! required finality proofs). This extension boosts priority of message delivery -//! transactions, based on the number of bundled messages. So transaction with more -//! messages has larger priority than the transaction with less messages. -//! See `bridge_runtime_common::priority_calculator` for details; -//! -//! This encourages relayers to include more messages to their delivery transactions. -//! At the same time, we are not verifying storage proofs before boosting -//! priority. Instead, we simply trust relayer, when it says that transaction delivers -//! `N` messages. -//! -//! This allows relayers to submit transactions which declare large number of bundled -//! transactions to receive priority boost for free, potentially pushing actual delivery -//! transactions from the block (or even transaction queue). Such transactions are -//! not free, but their cost is relatively small. -//! -//! To alleviate that, we only boost transactions of relayers that have some stake -//! that guarantees that their transactions are valid. Such relayers get priority -//! for free, but they risk to lose their stake. - -use crate::RewardsAccountParams; - -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; -use sp_runtime::{ - traits::{Get, Zero}, - DispatchError, DispatchResult, -}; - -/// Either explicit account reference or `RewardsAccountParams`. -#[derive(Clone, Debug)] -pub enum ExplicitOrAccountParams { - /// Explicit account reference. - Explicit(AccountId), - /// Account, referenced using `RewardsAccountParams`. - Params(RewardsAccountParams), -} - -impl From for ExplicitOrAccountParams { - fn from(params: RewardsAccountParams) -> Self { - ExplicitOrAccountParams::Params(params) - } -} - -/// Relayer registration. -#[derive(Copy, Clone, Debug, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen)] -pub struct Registration { - /// The last block number, where this registration is considered active. - /// - /// Relayer has an option to renew his registration (this may be done before it - /// is spoiled as well). Starting from block `valid_till + 1`, relayer may `deregister` - /// himself and get his stake back. - /// - /// Please keep in mind that priority boost stops working some blocks before the - /// registration ends (see [`StakeAndSlash::RequiredRegistrationLease`]). - pub valid_till: BlockNumber, - /// Active relayer stake, which is mapped to the relayer reserved balance. - /// - /// If `stake` is less than the [`StakeAndSlash::RequiredStake`], the registration - /// is considered inactive even if `valid_till + 1` is not yet reached. - pub stake: Balance, -} - -/// Relayer stake-and-slash mechanism. -pub trait StakeAndSlash { - /// The stake that the relayer must have to have its transactions boosted. - type RequiredStake: Get; - /// Required **remaining** registration lease to be able to get transaction priority boost. - /// - /// If the difference between registration's `valid_till` and the current block number - /// is less than the `RequiredRegistrationLease`, it becomes inactive and relayer transaction - /// won't get priority boost. This period exists, because priority is calculated when - /// transaction is placed to the queue (and it is reevaluated periodically) and then some time - /// may pass before transaction will be included into the block. - type RequiredRegistrationLease: Get; - - /// Reserve the given amount at relayer account. - fn reserve(relayer: &AccountId, amount: Balance) -> DispatchResult; - /// `Unreserve` the given amount from relayer account. - /// - /// Returns amount that we have failed to `unreserve`. - fn unreserve(relayer: &AccountId, amount: Balance) -> Balance; - /// Slash up to `amount` from reserved balance of account `relayer` and send funds to given - /// `beneficiary`. - /// - /// Returns `Ok(_)` with non-zero balance if we have failed to repatriate some portion of stake. - fn repatriate_reserved( - relayer: &AccountId, - beneficiary: ExplicitOrAccountParams, - amount: Balance, - ) -> Result; -} - -impl StakeAndSlash for () -where - Balance: Default + Zero, - BlockNumber: Default, -{ - type RequiredStake = (); - type RequiredRegistrationLease = (); - - fn reserve(_relayer: &AccountId, _amount: Balance) -> DispatchResult { - Ok(()) - } - - fn unreserve(_relayer: &AccountId, _amount: Balance) -> Balance { - Zero::zero() - } - - fn repatriate_reserved( - _relayer: &AccountId, - _beneficiary: ExplicitOrAccountParams, - _amount: Balance, - ) -> Result { - Ok(Zero::zero()) - } -} diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml deleted file mode 100644 index d7cef6a10320b96a632e5ac0b1f63d4a01906532..0000000000000000000000000000000000000000 --- a/primitives/runtime/Cargo.toml +++ /dev/null @@ -1,54 +0,0 @@ -[package] -name = "bp-runtime" -description = "Primitives that may be used at (bridges) runtime level." -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -hash-db = { version = "0.16.0", default-features = false } -impl-trait-for-tuples = "0.2.2" -log = { workspace = true } -num-traits = { version = "0.2", default-features = false } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -serde = { features = ["alloc", "derive"], workspace = true } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false, features = ["serde"] } -sp-state-machine = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -trie-db = { version = "0.28.0", default-features = false } - -[dev-dependencies] -hex-literal = "0.4" - -[features] -default = ["std"] -std = [ - "codec/std", - "frame-support/std", - "frame-system/std", - "hash-db/std", - "log/std", - "num-traits/std", - "scale-info/std", - "serde/std", - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-state-machine/std", - "sp-std/std", - "sp-trie/std", - "trie-db/std", -] diff --git a/primitives/runtime/src/chain.rs b/primitives/runtime/src/chain.rs deleted file mode 100644 index a405d84eacca27decbddf2851e4efc9dfeb6de2a..0000000000000000000000000000000000000000 --- a/primitives/runtime/src/chain.rs +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ChainId, HeaderIdProvider}; - -use codec::{Codec, Decode, Encode, MaxEncodedLen}; -use frame_support::{weights::Weight, Parameter}; -use num_traits::{AsPrimitive, Bounded, CheckedSub, Saturating, SaturatingAdd, Zero}; -use sp_runtime::{ - traits::{ - AtLeast32Bit, AtLeast32BitUnsigned, Hash as HashT, Header as HeaderT, MaybeDisplay, - MaybeSerialize, MaybeSerializeDeserialize, Member, SimpleBitOps, Verify, - }, - FixedPointOperand, -}; -use sp_std::{convert::TryFrom, fmt::Debug, hash::Hash, str::FromStr, vec, vec::Vec}; - -/// Chain call, that is either SCALE-encoded, or decoded. -#[derive(Debug, Clone, PartialEq)] -pub enum EncodedOrDecodedCall { - /// The call that is SCALE-encoded. - /// - /// This variant is used when we the chain runtime is not bundled with the relay, but - /// we still need the represent call in some RPC calls or transactions. - Encoded(Vec), - /// The decoded call. - Decoded(ChainCall), -} - -impl EncodedOrDecodedCall { - /// Returns decoded call. - pub fn to_decoded(&self) -> Result { - match self { - Self::Encoded(ref encoded_call) => - ChainCall::decode(&mut &encoded_call[..]).map_err(Into::into), - Self::Decoded(ref decoded_call) => Ok(decoded_call.clone()), - } - } - - /// Converts self to decoded call. - pub fn into_decoded(self) -> Result { - match self { - Self::Encoded(encoded_call) => - ChainCall::decode(&mut &encoded_call[..]).map_err(Into::into), - Self::Decoded(decoded_call) => Ok(decoded_call), - } - } - - /// Converts self to encoded call. - pub fn into_encoded(self) -> Vec { - match self { - Self::Encoded(encoded_call) => encoded_call, - Self::Decoded(decoded_call) => decoded_call.encode(), - } - } -} - -impl From for EncodedOrDecodedCall { - fn from(call: ChainCall) -> EncodedOrDecodedCall { - EncodedOrDecodedCall::Decoded(call) - } -} - -impl Decode for EncodedOrDecodedCall { - fn decode(input: &mut I) -> Result { - // having encoded version is better than decoded, because decoding isn't required - // everywhere and for mocked calls it may lead to **unneeded** errors - match input.remaining_len()? { - Some(remaining_len) => { - let mut encoded_call = vec![0u8; remaining_len]; - input.read(&mut encoded_call)?; - Ok(EncodedOrDecodedCall::Encoded(encoded_call)) - }, - None => Ok(EncodedOrDecodedCall::Decoded(ChainCall::decode(input)?)), - } - } -} - -impl Encode for EncodedOrDecodedCall { - fn encode(&self) -> Vec { - match *self { - Self::Encoded(ref encoded_call) => encoded_call.clone(), - Self::Decoded(ref decoded_call) => decoded_call.encode(), - } - } -} - -// dummy implementation to satisfy `SignedPayload` requirements -impl sp_runtime::traits::Dispatchable for EncodedOrDecodedCall { - type RuntimeOrigin = (); - type Config = (); - type Info = (); - type PostInfo = (); - fn dispatch(self, _origin: ()) -> sp_runtime::DispatchResultWithInfo<()> { - unreachable!("never used by relayer; qed") - } -} - -/// Minimal Substrate-based chain representation that may be used from no_std environment. -pub trait Chain: Send + Sync + 'static { - /// Chain id. - const ID: ChainId; - - /// A type that fulfills the abstract idea of what a Substrate block number is. - // Constraits come from the associated Number type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Number - // - // Note that the `AsPrimitive` trait is required by the GRANDPA justification - // verifier, and is not usually part of a Substrate Header's Number type. - type BlockNumber: Parameter - + Member - + MaybeSerializeDeserialize - + Hash - + Copy - + Default - + MaybeDisplay - + AtLeast32BitUnsigned - + FromStr - + AsPrimitive - + Default - + Saturating - + MaxEncodedLen; - - /// A type that fulfills the abstract idea of what a Substrate hash is. - // Constraits come from the associated Hash type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hash - type Hash: Parameter - + Member - + MaybeSerializeDeserialize - + Hash - + Ord - + Copy - + MaybeDisplay - + Default - + SimpleBitOps - + AsRef<[u8]> - + AsMut<[u8]> - + MaxEncodedLen; - - /// A type that fulfills the abstract idea of what a Substrate hasher (a type - /// that produces hashes) is. - // Constraits come from the associated Hashing type of `sp_runtime::traits::Header` - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html#associatedtype.Hashing - type Hasher: HashT; - - /// A type that fulfills the abstract idea of what a Substrate header is. - // See here for more info: - // https://crates.parity.io/sp_runtime/traits/trait.Header.html - type Header: Parameter - + HeaderT - + HeaderIdProvider - + MaybeSerializeDeserialize; - - /// The user account identifier type for the runtime. - type AccountId: Parameter - + Member - + MaybeSerializeDeserialize - + Debug - + MaybeDisplay - + Ord - + MaxEncodedLen; - /// Balance of an account in native tokens. - /// - /// The chain may support multiple tokens, but this particular type is for token that is used - /// to pay for transaction dispatch, to reward different relayers (headers, messages), etc. - type Balance: AtLeast32BitUnsigned - + FixedPointOperand - + Parameter - + Member - + MaybeSerializeDeserialize - + Clone - + Copy - + Bounded - + CheckedSub - + PartialOrd - + SaturatingAdd - + Zero - + TryFrom - + MaxEncodedLen; - /// Nonce of a transaction used by the chain. - type Nonce: Parameter - + Member - + MaybeSerialize - + Debug - + Default - + MaybeDisplay - + MaybeSerializeDeserialize - + AtLeast32Bit - + Copy - + MaxEncodedLen; - /// Signature type, used on this chain. - type Signature: Parameter + Verify; - - /// Get the maximum size (in bytes) of a Normal extrinsic at this chain. - fn max_extrinsic_size() -> u32; - /// Get the maximum weight (compute time) that a Normal extrinsic at this chain can use. - fn max_extrinsic_weight() -> Weight; -} - -/// A trait that provides the type of the underlying chain. -pub trait UnderlyingChainProvider: Send + Sync + 'static { - /// Underlying chain type. - type Chain: Chain; -} - -impl Chain for T -where - T: Send + Sync + 'static + UnderlyingChainProvider, -{ - const ID: ChainId = ::ID; - - type BlockNumber = ::BlockNumber; - type Hash = ::Hash; - type Hasher = ::Hasher; - type Header = ::Header; - type AccountId = ::AccountId; - type Balance = ::Balance; - type Nonce = ::Nonce; - type Signature = ::Signature; - - fn max_extrinsic_size() -> u32 { - ::max_extrinsic_size() - } - - fn max_extrinsic_weight() -> Weight { - ::max_extrinsic_weight() - } -} - -/// Minimal parachain representation that may be used from no_std environment. -pub trait Parachain: Chain { - /// Parachain identifier. - const PARACHAIN_ID: u32; - /// Maximal size of the parachain header. - /// - /// This isn't a strict limit. The relayer may submit larger headers and the - /// pallet will accept the call. The limit is only used to compute whether - /// the refund can be made. - const MAX_HEADER_SIZE: u32; -} - -impl Parachain for T -where - T: Chain + UnderlyingChainProvider, - ::Chain: Parachain, -{ - const PARACHAIN_ID: u32 = <::Chain as Parachain>::PARACHAIN_ID; - const MAX_HEADER_SIZE: u32 = - <::Chain as Parachain>::MAX_HEADER_SIZE; -} - -/// Adapter for `Get` to access `PARACHAIN_ID` from `trait Parachain` -pub struct ParachainIdOf(sp_std::marker::PhantomData); -impl frame_support::traits::Get for ParachainIdOf { - fn get() -> u32 { - Para::PARACHAIN_ID - } -} - -/// Underlying chain type. -pub type UnderlyingChainOf = ::Chain; - -/// Block number used by the chain. -pub type BlockNumberOf = ::BlockNumber; - -/// Hash type used by the chain. -pub type HashOf = ::Hash; - -/// Hasher type used by the chain. -pub type HasherOf = ::Hasher; - -/// Header type used by the chain. -pub type HeaderOf = ::Header; - -/// Account id type used by the chain. -pub type AccountIdOf = ::AccountId; - -/// Balance type used by the chain. -pub type BalanceOf = ::Balance; - -/// Transaction nonce type used by the chain. -pub type NonceOf = ::Nonce; - -/// Signature type used by the chain. -pub type SignatureOf = ::Signature; - -/// Account public type used by the chain. -pub type AccountPublicOf = as Verify>::Signer; - -/// Transaction era used by the chain. -pub type TransactionEraOf = crate::TransactionEra, HashOf>; - -/// Convenience macro that declares bridge finality runtime apis and related constants for a chain. -/// This includes: -/// - chain-specific bridge runtime APIs: -/// - `FinalityApi` -/// - constants that are stringified names of runtime API methods: -/// - `BEST_FINALIZED__HEADER_METHOD` -/// - `_ACCEPTED__FINALITY_PROOFS_METHOD` -/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). -#[macro_export] -macro_rules! decl_bridge_finality_runtime_apis { - ($chain: ident $(, $consensus: ident => $justification_type: ty)?) => { - bp_runtime::paste::item! { - mod [<$chain _finality_api>] { - use super::*; - - /// Name of the `FinalityApi::best_finalized` runtime method. - pub const []: &str = - stringify!([<$chain:camel FinalityApi_best_finalized>]); - - /// Name of the `FinalityApi::free_headers_interval` runtime method. - pub const []: &str = - stringify!([<$chain:camel FinalityApi_free_headers_interval>]); - - $( - /// Name of the `FinalityApi::accepted__finality_proofs` - /// runtime method. - pub const [<$chain:upper _SYNCED_HEADERS_ $consensus:upper _INFO_METHOD>]: &str = - stringify!([<$chain:camel FinalityApi_synced_headers_ $consensus:lower _info>]); - )? - - sp_api::decl_runtime_apis! { - /// API for querying information about the finalized chain headers. - /// - /// This API is implemented by runtimes that are receiving messages from this chain, not by this - /// chain's runtime itself. - pub trait [<$chain:camel FinalityApi>] { - /// Returns number and hash of the best finalized header known to the bridge module. - fn best_finalized() -> Option>; - - /// Returns free headers interval, if it is configured in the runtime. - /// The caller expects that his transactions for every `N`th header - /// (where `N` is the configured interval) will be fee-free. - /// - /// See [`pallet_bridge_grandpa::Config::FreeHeadersInterval`] for details. - fn free_headers_interval() -> Option; - - $( - /// Returns the justifications accepted in the current block. - fn []( - ) -> sp_std::vec::Vec<$justification_type>; - )? - } - } - } - - pub use [<$chain _finality_api>]::*; - } - }; - ($chain: ident, grandpa) => { - decl_bridge_finality_runtime_apis!($chain, grandpa => bp_header_chain::StoredHeaderGrandpaInfo
); - }; -} - -/// Convenience macro that declares bridge messages runtime apis and related constants for a chain. -/// This includes: -/// - chain-specific bridge runtime APIs: -/// - `ToOutboundLaneApi` -/// - `FromInboundLaneApi` -/// - constants that are stringified names of runtime API methods: -/// - `FROM__MESSAGE_DETAILS_METHOD`, -/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). -#[macro_export] -macro_rules! decl_bridge_messages_runtime_apis { - ($chain: ident) => { - bp_runtime::paste::item! { - mod [<$chain _messages_api>] { - use super::*; - - /// Name of the `ToOutboundLaneApi::message_details` runtime method. - pub const []: &str = - stringify!([]); - - /// Name of the `FromInboundLaneApi::message_details` runtime method. - pub const []: &str = - stringify!([]); - - sp_api::decl_runtime_apis! { - /// Outbound message lane API for messages that are sent to this chain. - /// - /// This API is implemented by runtimes that are receiving messages from this chain, not by this - /// chain's runtime itself. - pub trait [] { - /// Returns dispatch weight, encoded payload size and delivery+dispatch fee of all - /// messages in given inclusive range. - /// - /// If some (or all) messages are missing from the storage, they'll also will - /// be missing from the resulting vector. The vector is ordered by the nonce. - fn message_details( - lane: bp_messages::LaneId, - begin: bp_messages::MessageNonce, - end: bp_messages::MessageNonce, - ) -> sp_std::vec::Vec; - } - - /// Inbound message lane API for messages sent by this chain. - /// - /// This API is implemented by runtimes that are receiving messages from this chain, not by this - /// chain's runtime itself. - /// - /// Entries of the resulting vector are matching entries of the `messages` vector. Entries of the - /// `messages` vector may (and need to) be read using `ToOutboundLaneApi::message_details`. - pub trait [] { - /// Return details of given inbound messages. - fn message_details( - lane: bp_messages::LaneId, - messages: sp_std::vec::Vec<(bp_messages::MessagePayload, bp_messages::OutboundMessageDetails)>, - ) -> sp_std::vec::Vec; - } - } - } - - pub use [<$chain _messages_api>]::*; - } - }; -} - -/// Convenience macro that declares bridge finality runtime apis, bridge messages runtime apis -/// and related constants for a chain. -/// The name of the chain has to be specified in snake case (e.g. `bridge_hub_polkadot`). -#[macro_export] -macro_rules! decl_bridge_runtime_apis { - ($chain: ident $(, $consensus: ident)?) => { - bp_runtime::decl_bridge_finality_runtime_apis!($chain $(, $consensus)?); - bp_runtime::decl_bridge_messages_runtime_apis!($chain); - }; -} diff --git a/primitives/runtime/src/extensions.rs b/primitives/runtime/src/extensions.rs deleted file mode 100644 index a31e7b5bb47a64ec2333bbaba3e9c520aa53ef5a..0000000000000000000000000000000000000000 --- a/primitives/runtime/src/extensions.rs +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives that may be used for creating signed extensions for indirect runtimes. - -use codec::{Compact, Decode, Encode}; -use impl_trait_for_tuples::impl_for_tuples; -use scale_info::{StaticTypeInfo, TypeInfo}; -use sp_runtime::{ - impl_tx_ext_default, - traits::{Dispatchable, TransactionExtension, TransactionExtensionBase}, - transaction_validity::TransactionValidityError, -}; -use sp_std::{fmt::Debug, marker::PhantomData}; - -/// Trait that describes some properties of a `TransactionExtension` that are needed in order to -/// send a transaction to the chain. -pub trait TransactionExtensionSchema: - Encode + Decode + Debug + Eq + Clone + StaticTypeInfo -{ - /// A type of the data encoded as part of the transaction. - type Payload: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; - /// Parameters which are part of the payload used to produce transaction signature, - /// but don't end up in the transaction itself (i.e. inherent part of the runtime). - type Implicit: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo; -} - -impl TransactionExtensionSchema for () { - type Payload = (); - type Implicit = (); -} - -/// An implementation of `TransactionExtensionSchema` using generic params. -#[derive(Encode, Decode, Clone, Debug, PartialEq, Eq, TypeInfo)] -pub struct GenericTransactionExtensionSchema(PhantomData<(P, S)>); - -impl TransactionExtensionSchema for GenericTransactionExtensionSchema -where - P: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, - S: Encode + Decode + Debug + Eq + Clone + StaticTypeInfo, -{ - type Payload = P; - type Implicit = S; -} - -/// The `TransactionExtensionSchema` for `frame_system::CheckNonZeroSender`. -pub type CheckNonZeroSender = GenericTransactionExtensionSchema<(), ()>; - -/// The `TransactionExtensionSchema` for `frame_system::CheckSpecVersion`. -pub type CheckSpecVersion = GenericTransactionExtensionSchema<(), u32>; - -/// The `TransactionExtensionSchema` for `frame_system::CheckTxVersion`. -pub type CheckTxVersion = GenericTransactionExtensionSchema<(), u32>; - -/// The `TransactionExtensionSchema` for `frame_system::CheckGenesis`. -pub type CheckGenesis = GenericTransactionExtensionSchema<(), Hash>; - -/// The `TransactionExtensionSchema` for `frame_system::CheckEra`. -pub type CheckEra = GenericTransactionExtensionSchema; - -/// The `TransactionExtensionSchema` for `frame_system::CheckNonce`. -pub type CheckNonce = GenericTransactionExtensionSchema, ()>; - -/// The `TransactionExtensionSchema` for `frame_system::CheckWeight`. -pub type CheckWeight = GenericTransactionExtensionSchema<(), ()>; - -/// The `TransactionExtensionSchema` for `pallet_transaction_payment::ChargeTransactionPayment`. -pub type ChargeTransactionPayment = - GenericTransactionExtensionSchema, ()>; - -/// The `TransactionExtensionSchema` for `polkadot-runtime-common::PrevalidateAttests`. -pub type PrevalidateAttests = GenericTransactionExtensionSchema<(), ()>; - -/// The `TransactionExtensionSchema` for `BridgeRejectObsoleteHeadersAndMessages`. -pub type BridgeRejectObsoleteHeadersAndMessages = GenericTransactionExtensionSchema<(), ()>; - -/// The `TransactionExtensionSchema` for `RefundBridgedParachainMessages`. -/// This schema is dedicated for `RefundBridgedParachainMessages` signed extension as -/// wildcard/placeholder, which relies on the scale encoding for `()` or `((), ())`, or `((), (), -/// ())` is the same. So runtime can contains any kind of tuple: -/// `(BridgeRefundBridgeHubRococoMessages)` -/// `(BridgeRefundBridgeHubRococoMessages, BridgeRefundBridgeHubWestendMessages)` -/// `(BridgeRefundParachainMessages1, ..., BridgeRefundParachainMessagesN)` -pub type RefundBridgedParachainMessagesSchema = GenericTransactionExtensionSchema<(), ()>; - -#[impl_for_tuples(1, 12)] -impl TransactionExtensionSchema for Tuple { - for_tuples!( type Payload = ( #( Tuple::Payload ),* ); ); - for_tuples!( type Implicit = ( #( Tuple::Implicit ),* ); ); -} - -/// A simplified version of signed extensions meant for producing signed transactions -/// and signed payloads in the client code. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -pub struct GenericTransactionExtension { - /// A payload that is included in the transaction. - pub payload: S::Payload, - #[codec(skip)] - // It may be set to `None` if extensions are decoded. We are never reconstructing transactions - // (and it makes no sense to do that) => decoded version of `TransactionExtensions` is only - // used to read fields of the `payload`. And when resigning transaction, we're reconstructing - // `TransactionExtensions` from scratch. - implicit: Option, -} - -impl GenericTransactionExtension { - /// Create new `GenericTransactionExtension` object. - pub fn new(payload: S::Payload, implicit: Option) -> Self { - Self { payload, implicit } - } -} - -impl TransactionExtensionBase for GenericTransactionExtension -where - S: TransactionExtensionSchema, - S::Payload: Send + Sync, - S::Implicit: Send + Sync, -{ - const IDENTIFIER: &'static str = "Not needed."; - type Implicit = S::Implicit; - - fn implicit(&self) -> Result { - // we shall not ever see this error in relay, because we are never signing decoded - // transactions. Instead we're constructing and signing new transactions. So the error code - // is kinda random here - self.implicit - .clone() - .ok_or(frame_support::unsigned::TransactionValidityError::Unknown( - frame_support::unsigned::UnknownTransaction::Custom(0xFF), - )) - } -} -impl TransactionExtension for GenericTransactionExtension -where - C: Dispatchable, - S: TransactionExtensionSchema, - S::Payload: Send + Sync, - S::Implicit: Send + Sync, -{ - type Pre = (); - type Val = (); - - impl_tx_ext_default!(C; Context; validate prepare); -} diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs deleted file mode 100644 index 850318923dc7671c26cc3edcf2f9d59bd7b987b9..0000000000000000000000000000000000000000 --- a/primitives/runtime/src/lib.rs +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives that may be used at (bridges) runtime level. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode, FullCodec, MaxEncodedLen}; -use frame_support::{ - pallet_prelude::DispatchResult, weights::Weight, PalletError, StorageHasher, StorageValue, -}; -use frame_system::RawOrigin; -use scale_info::TypeInfo; -use serde::{Deserialize, Serialize}; -use sp_core::storage::StorageKey; -use sp_runtime::{ - traits::{BadOrigin, Header as HeaderT, UniqueSaturatedInto}, - RuntimeDebug, -}; -use sp_std::{convert::TryFrom, fmt::Debug, ops::RangeInclusive, vec, vec::Vec}; - -pub use chain::{ - AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain, EncodedOrDecodedCall, HashOf, - HasherOf, HeaderOf, NonceOf, Parachain, ParachainIdOf, SignatureOf, TransactionEraOf, - UnderlyingChainOf, UnderlyingChainProvider, -}; -pub use frame_support::storage::storage_prefix as storage_value_final_key; -use num_traits::{CheckedAdd, CheckedSub, One, SaturatingAdd, Zero}; -pub use storage_proof::{ - record_all_keys as record_all_trie_keys, Error as StorageProofError, - ProofSize as StorageProofSize, RawStorageProof, StorageProofChecker, -}; -pub use storage_types::BoundedStorageValue; - -#[cfg(feature = "std")] -pub use storage_proof::craft_valid_storage_proof; - -pub mod extensions; -pub mod messages; - -mod chain; -mod storage_proof; -mod storage_types; - -// Re-export macro to aviod include paste dependency everywhere -pub use sp_runtime::paste; - -/// Use this when something must be shared among all instances. -pub const NO_INSTANCE_ID: ChainId = [0, 0, 0, 0]; - -/// Generic header Id. -#[derive( - RuntimeDebug, - Default, - Clone, - Encode, - Decode, - Copy, - Eq, - Hash, - MaxEncodedLen, - PartialEq, - PartialOrd, - Ord, - TypeInfo, -)] -pub struct HeaderId(pub Number, pub Hash); - -impl HeaderId { - /// Return header number. - pub fn number(&self) -> Number { - self.0 - } - - /// Return header hash. - pub fn hash(&self) -> Hash { - self.1 - } -} - -/// Header id used by the chain. -pub type HeaderIdOf = HeaderId, BlockNumberOf>; - -/// Generic header id provider. -pub trait HeaderIdProvider { - /// Get the header id. - fn id(&self) -> HeaderId; - - /// Get the header id for the parent block. - fn parent_id(&self) -> Option>; -} - -impl HeaderIdProvider
for Header { - fn id(&self) -> HeaderId { - HeaderId(*self.number(), self.hash()) - } - - fn parent_id(&self) -> Option> { - self.number() - .checked_sub(&One::one()) - .map(|parent_number| HeaderId(parent_number, *self.parent_hash())) - } -} - -/// Unique identifier of the chain. -/// -/// In addition to its main function (identifying the chain), this type may also be used to -/// identify module instance. We have a bunch of pallets that may be used in different bridges. E.g. -/// messages pallet may be deployed twice in the same runtime to bridge ThisChain with Chain1 and -/// Chain2. Sometimes we need to be able to identify deployed instance dynamically. This type may be -/// used for that. -pub type ChainId = [u8; 4]; - -/// Anything that has size. -pub trait Size { - /// Return size of this object (in bytes). - fn size(&self) -> u32; -} - -impl Size for () { - fn size(&self) -> u32 { - 0 - } -} - -impl Size for Vec { - fn size(&self) -> u32 { - self.len() as _ - } -} - -/// Pre-computed size. -pub struct PreComputedSize(pub usize); - -impl Size for PreComputedSize { - fn size(&self) -> u32 { - u32::try_from(self.0).unwrap_or(u32::MAX) - } -} - -/// Era of specific transaction. -#[derive(RuntimeDebug, Clone, Copy, PartialEq)] -pub enum TransactionEra { - /// Transaction is immortal. - Immortal, - /// Transaction is valid for a given number of blocks, starting from given block. - Mortal(HeaderId, u32), -} - -impl, BlockHash: Copy> - TransactionEra -{ - /// Prepare transaction era, based on mortality period and current best block number. - pub fn new( - best_block_id: HeaderId, - mortality_period: Option, - ) -> Self { - mortality_period - .map(|mortality_period| TransactionEra::Mortal(best_block_id, mortality_period)) - .unwrap_or(TransactionEra::Immortal) - } - - /// Create new immortal transaction era. - pub fn immortal() -> Self { - TransactionEra::Immortal - } - - /// Returns mortality period if transaction is mortal. - pub fn mortality_period(&self) -> Option { - match *self { - TransactionEra::Immortal => None, - TransactionEra::Mortal(_, period) => Some(period), - } - } - - /// Returns era that is used by FRAME-based runtimes. - pub fn frame_era(&self) -> sp_runtime::generic::Era { - match *self { - TransactionEra::Immortal => sp_runtime::generic::Era::immortal(), - // `unique_saturated_into` is fine here - mortality `u64::MAX` is not something we - // expect to see on any chain - TransactionEra::Mortal(header_id, period) => - sp_runtime::generic::Era::mortal(period as _, header_id.0.unique_saturated_into()), - } - } - - /// Returns header hash that needs to be included in the signature payload. - pub fn signed_payload(&self, genesis_hash: BlockHash) -> BlockHash { - match *self { - TransactionEra::Immortal => genesis_hash, - TransactionEra::Mortal(header_id, _) => header_id.1, - } - } -} - -/// This is a copy of the -/// `frame_support::storage::generator::StorageMap::storage_map_final_key` for maps based -/// on selected hasher. -/// -/// We're using it because to call `storage_map_final_key` directly, we need access to the runtime -/// and pallet instance, which (sometimes) is impossible. -pub fn storage_map_final_key( - pallet_prefix: &str, - map_name: &str, - key: &[u8], -) -> StorageKey { - let key_hashed = H::hash(key); - let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes()); - let storage_prefix_hashed = frame_support::Twox128::hash(map_name.as_bytes()); - - let mut final_key = Vec::with_capacity( - pallet_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), - ); - - final_key.extend_from_slice(&pallet_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key_hashed.as_ref()); - - StorageKey(final_key) -} - -/// This is how a storage key of storage value is computed. -/// -/// Copied from `frame_support::storage::storage_prefix`. -pub fn storage_value_key(pallet_prefix: &str, value_name: &str) -> StorageKey { - let pallet_hash = sp_io::hashing::twox_128(pallet_prefix.as_bytes()); - let storage_hash = sp_io::hashing::twox_128(value_name.as_bytes()); - - let mut final_key = vec![0u8; 32]; - final_key[..16].copy_from_slice(&pallet_hash); - final_key[16..].copy_from_slice(&storage_hash); - - StorageKey(final_key) -} - -/// Can be use to access the runtime storage key of a `StorageMap`. -pub trait StorageMapKeyProvider { - /// The name of the variable that holds the `StorageMap`. - const MAP_NAME: &'static str; - - /// The same as `StorageMap::Hasher1`. - type Hasher: StorageHasher; - /// The same as `StorageMap::Key1`. - type Key: FullCodec; - /// The same as `StorageMap::Value`. - type Value: FullCodec; - - /// This is a copy of the - /// `frame_support::storage::generator::StorageMap::storage_map_final_key`. - /// - /// We're using it because to call `storage_map_final_key` directly, we need access - /// to the runtime and pallet instance, which (sometimes) is impossible. - fn final_key(pallet_prefix: &str, key: &Self::Key) -> StorageKey { - storage_map_final_key::(pallet_prefix, Self::MAP_NAME, &key.encode()) - } -} - -/// Can be use to access the runtime storage key of a `StorageDoubleMap`. -pub trait StorageDoubleMapKeyProvider { - /// The name of the variable that holds the `StorageDoubleMap`. - const MAP_NAME: &'static str; - - /// The same as `StorageDoubleMap::Hasher1`. - type Hasher1: StorageHasher; - /// The same as `StorageDoubleMap::Key1`. - type Key1: FullCodec; - /// The same as `StorageDoubleMap::Hasher2`. - type Hasher2: StorageHasher; - /// The same as `StorageDoubleMap::Key2`. - type Key2: FullCodec; - /// The same as `StorageDoubleMap::Value`. - type Value: FullCodec; - - /// This is a copy of the - /// `frame_support::storage::generator::StorageDoubleMap::storage_double_map_final_key`. - /// - /// We're using it because to call `storage_double_map_final_key` directly, we need access - /// to the runtime and pallet instance, which (sometimes) is impossible. - fn final_key(pallet_prefix: &str, key1: &Self::Key1, key2: &Self::Key2) -> StorageKey { - let key1_hashed = Self::Hasher1::hash(&key1.encode()); - let key2_hashed = Self::Hasher2::hash(&key2.encode()); - let pallet_prefix_hashed = frame_support::Twox128::hash(pallet_prefix.as_bytes()); - let storage_prefix_hashed = frame_support::Twox128::hash(Self::MAP_NAME.as_bytes()); - - let mut final_key = Vec::with_capacity( - pallet_prefix_hashed.len() + - storage_prefix_hashed.len() + - key1_hashed.as_ref().len() + - key2_hashed.as_ref().len(), - ); - - final_key.extend_from_slice(&pallet_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key1_hashed.as_ref()); - final_key.extend_from_slice(key2_hashed.as_ref()); - - StorageKey(final_key) - } -} - -/// Error generated by the `OwnedBridgeModule` trait. -#[derive(Encode, Decode, PartialEq, Eq, TypeInfo, PalletError)] -pub enum OwnedBridgeModuleError { - /// All pallet operations are halted. - Halted, -} - -/// Operating mode for a bridge module. -pub trait OperatingMode: Send + Copy + Debug + FullCodec { - /// Returns true if the bridge module is halted. - fn is_halted(&self) -> bool; -} - -/// Basic operating modes for a bridges module (Normal/Halted). -#[derive( - Encode, - Decode, - Clone, - Copy, - PartialEq, - Eq, - RuntimeDebug, - TypeInfo, - MaxEncodedLen, - Serialize, - Deserialize, -)] -pub enum BasicOperatingMode { - /// Normal mode, when all operations are allowed. - Normal, - /// The pallet is halted. All operations (except operating mode change) are prohibited. - Halted, -} - -impl Default for BasicOperatingMode { - fn default() -> Self { - Self::Normal - } -} - -impl OperatingMode for BasicOperatingMode { - fn is_halted(&self) -> bool { - *self == BasicOperatingMode::Halted - } -} - -/// Bridge module that has owner and operating mode -pub trait OwnedBridgeModule { - /// The target that will be used when publishing logs related to this module. - const LOG_TARGET: &'static str; - - /// A storage entry that holds the module `Owner` account. - type OwnerStorage: StorageValue>; - /// Operating mode type of the pallet. - type OperatingMode: OperatingMode; - /// A storage value that holds the pallet operating mode. - type OperatingModeStorage: StorageValue; - - /// Check if the module is halted. - fn is_halted() -> bool { - Self::OperatingModeStorage::get().is_halted() - } - - /// Ensure that the origin is either root, or `PalletOwner`. - fn ensure_owner_or_root(origin: T::RuntimeOrigin) -> Result<(), BadOrigin> { - match origin.into() { - Ok(RawOrigin::Root) => Ok(()), - Ok(RawOrigin::Signed(ref signer)) - if Self::OwnerStorage::get().as_ref() == Some(signer) => - Ok(()), - _ => Err(BadOrigin), - } - } - - /// Ensure that the module is not halted. - fn ensure_not_halted() -> Result<(), OwnedBridgeModuleError> { - match Self::is_halted() { - true => Err(OwnedBridgeModuleError::Halted), - false => Ok(()), - } - } - - /// Change the owner of the module. - fn set_owner(origin: T::RuntimeOrigin, maybe_owner: Option) -> DispatchResult { - Self::ensure_owner_or_root(origin)?; - match maybe_owner { - Some(owner) => { - Self::OwnerStorage::put(&owner); - log::info!(target: Self::LOG_TARGET, "Setting pallet Owner to: {:?}", owner); - }, - None => { - Self::OwnerStorage::kill(); - log::info!(target: Self::LOG_TARGET, "Removed Owner of pallet."); - }, - } - - Ok(()) - } - - /// Halt or resume all/some module operations. - fn set_operating_mode( - origin: T::RuntimeOrigin, - operating_mode: Self::OperatingMode, - ) -> DispatchResult { - Self::ensure_owner_or_root(origin)?; - Self::OperatingModeStorage::put(operating_mode); - log::info!(target: Self::LOG_TARGET, "Setting operating mode to {:?}.", operating_mode); - Ok(()) - } -} - -/// All extra operations with weights that we need in bridges. -pub trait WeightExtraOps { - /// Checked division of individual components of two weights. - /// - /// Divides components and returns minimal division result. Returns `None` if one - /// of `other` weight components is zero. - fn min_components_checked_div(&self, other: Weight) -> Option; -} - -impl WeightExtraOps for Weight { - fn min_components_checked_div(&self, other: Weight) -> Option { - Some(sp_std::cmp::min( - self.ref_time().checked_div(other.ref_time())?, - self.proof_size().checked_div(other.proof_size())?, - )) - } -} - -/// Trait that provides a static `str`. -pub trait StaticStrProvider { - /// Static string. - const STR: &'static str; -} - -/// A macro that generates `StaticStrProvider` with the string set to its stringified argument. -#[macro_export] -macro_rules! generate_static_str_provider { - ($str:expr) => { - $crate::paste::item! { - pub struct []; - - impl $crate::StaticStrProvider for [] { - const STR: &'static str = stringify!($str); - } - } - }; -} - -/// Error message that is only dispayable in `std` environment. -#[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, TypeInfo)] -#[scale_info(skip_type_params(T))] -pub struct StrippableError { - _phantom_data: sp_std::marker::PhantomData, - #[codec(skip)] - #[cfg(feature = "std")] - message: String, -} - -impl From for StrippableError { - fn from(_err: T) -> Self { - Self { - _phantom_data: Default::default(), - #[cfg(feature = "std")] - message: format!("{:?}", _err), - } - } -} - -impl Debug for StrippableError { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - f.write_str(&self.message) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - f.write_str("Stripped error") - } -} - -/// A trait defining helper methods for `RangeInclusive` (start..=end) -pub trait RangeInclusiveExt { - /// Computes the length of the `RangeInclusive`, checking for underflow and overflow. - fn checked_len(&self) -> Option; - /// Computes the length of the `RangeInclusive`, saturating in case of underflow or overflow. - fn saturating_len(&self) -> Idx; -} - -impl RangeInclusiveExt for RangeInclusive -where - Idx: CheckedSub + CheckedAdd + SaturatingAdd + One + Zero, -{ - fn checked_len(&self) -> Option { - self.end() - .checked_sub(self.start()) - .and_then(|len| len.checked_add(&Idx::one())) - } - - fn saturating_len(&self) -> Idx { - let len = match self.end().checked_sub(self.start()) { - Some(len) => len, - None => return Idx::zero(), - }; - len.saturating_add(&Idx::one()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn storage_value_key_works() { - assert_eq!( - storage_value_key("PalletTransactionPayment", "NextFeeMultiplier"), - StorageKey( - hex_literal::hex!( - "f0e954dfcca51a255ab12c60c789256a3f2edf3bdf381debe331ab7446addfdc" - ) - .to_vec() - ), - ); - } - - #[test] - fn generate_static_str_provider_works() { - generate_static_str_provider!(Test); - assert_eq!(StrTest::STR, "Test"); - } -} diff --git a/primitives/runtime/src/messages.rs b/primitives/runtime/src/messages.rs deleted file mode 100644 index 0f219e984f7289b231c888c55e0e2acfb8829a88..0000000000000000000000000000000000000000 --- a/primitives/runtime/src/messages.rs +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives that may be used by different message delivery and dispatch mechanisms. - -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; - -/// Message dispatch result. -#[derive(Encode, Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct MessageDispatchResult { - /// Unspent dispatch weight. This weight that will be deducted from total delivery transaction - /// weight, thus reducing the transaction cost. This shall not be zero in (at least) two cases: - /// - /// 1) if message has been dispatched successfully, but post-dispatch weight is less than the - /// weight, declared by the message sender; - /// 2) if message has not been dispatched at all. - pub unspent_weight: Weight, - /// Fine-grained result of single message dispatch (for better diagnostic purposes) - pub dispatch_level_result: DispatchLevelResult, -} diff --git a/primitives/runtime/src/storage_proof.rs b/primitives/runtime/src/storage_proof.rs deleted file mode 100644 index 1b706aa66c16fc73a21ce83f550bea8a8fe128e5..0000000000000000000000000000000000000000 --- a/primitives/runtime/src/storage_proof.rs +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Logic for checking Substrate storage proofs. - -use crate::StrippableError; -use codec::{Decode, Encode}; -use frame_support::PalletError; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; -use scale_info::TypeInfo; -use sp_std::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; -use sp_trie::{ - read_trie_value, LayoutV1, MemoryDB, Recorder, StorageProof, Trie, TrieConfiguration, - TrieDBBuilder, TrieError, TrieHash, -}; - -/// Raw storage proof type (just raw trie nodes). -pub type RawStorageProof = Vec>; - -/// Storage proof size requirements. -/// -/// This is currently used by benchmarks when generating storage proofs. -#[derive(Clone, Copy, Debug)] -pub enum ProofSize { - /// The proof is expected to be minimal. If value size may be changed, then it is expected to - /// have given size. - Minimal(u32), - /// The proof is expected to have at least given size and grow by increasing value that is - /// stored in the trie. - HasLargeLeaf(u32), -} - -/// This struct is used to read storage values from a subset of a Merklized database. The "proof" -/// is a subset of the nodes in the Merkle structure of the database, so that it provides -/// authentication against a known Merkle root as well as the values in the -/// database themselves. -pub struct StorageProofChecker -where - H: Hasher, -{ - proof_nodes_count: usize, - root: H::Out, - db: MemoryDB, - recorder: Recorder>, -} - -impl StorageProofChecker -where - H: Hasher, -{ - /// Constructs a new storage proof checker. - /// - /// This returns an error if the given proof is invalid with respect to the given root. - pub fn new(root: H::Out, proof: RawStorageProof) -> Result { - // 1. we don't want extra items in the storage proof - // 2. `StorageProof` is storing all trie nodes in the `BTreeSet` - // - // => someone could simply add duplicate items to the proof and we won't be - // able to detect that by just using `StorageProof` - // - // => let's check it when we are converting our "raw proof" into `StorageProof` - let proof_nodes_count = proof.len(); - let proof = StorageProof::new(proof); - if proof_nodes_count != proof.iter_nodes().count() { - return Err(Error::DuplicateNodesInProof) - } - - let db = proof.into_memory_db(); - if !db.contains(&root, EMPTY_PREFIX) { - return Err(Error::StorageRootMismatch) - } - - let recorder = Recorder::default(); - let checker = StorageProofChecker { proof_nodes_count, root, db, recorder }; - Ok(checker) - } - - /// Returns error if the proof has some nodes that are left intact by previous `read_value` - /// calls. - pub fn ensure_no_unused_nodes(mut self) -> Result<(), Error> { - let visited_nodes = self - .recorder - .drain() - .into_iter() - .map(|record| record.data) - .collect::>(); - let visited_nodes_count = visited_nodes.len(); - if self.proof_nodes_count == visited_nodes_count { - Ok(()) - } else { - Err(Error::UnusedNodesInTheProof) - } - } - - /// Reads a value from the available subset of storage. If the value cannot be read due to an - /// incomplete or otherwise invalid proof, this function returns an error. - pub fn read_value(&mut self, key: &[u8]) -> Result>, Error> { - // LayoutV1 or LayoutV0 is identical for proof that only read values. - read_trie_value::, _>(&self.db, &self.root, key, Some(&mut self.recorder), None) - .map_err(|_| Error::StorageValueUnavailable) - } - - /// Reads and decodes a value from the available subset of storage. If the value cannot be read - /// due to an incomplete or otherwise invalid proof, this function returns an error. If value is - /// read, but decoding fails, this function returns an error. - pub fn read_and_decode_value(&mut self, key: &[u8]) -> Result, Error> { - self.read_value(key).and_then(|v| { - v.map(|v| T::decode(&mut &v[..]).map_err(|e| Error::StorageValueDecodeFailed(e.into()))) - .transpose() - }) - } - - /// Reads and decodes a value from the available subset of storage. If the value cannot be read - /// due to an incomplete or otherwise invalid proof, or if the value is `None`, this function - /// returns an error. If value is read, but decoding fails, this function returns an error. - pub fn read_and_decode_mandatory_value(&mut self, key: &[u8]) -> Result { - self.read_and_decode_value(key)?.ok_or(Error::StorageValueEmpty) - } - - /// Reads and decodes a value from the available subset of storage. If the value cannot be read - /// due to an incomplete or otherwise invalid proof, this function returns `Ok(None)`. - /// If value is read, but decoding fails, this function returns an error. - pub fn read_and_decode_opt_value(&mut self, key: &[u8]) -> Result, Error> { - match self.read_and_decode_value(key) { - Ok(outbound_lane_data) => Ok(outbound_lane_data), - Err(Error::StorageValueUnavailable) => Ok(None), - Err(e) => Err(e), - } - } -} - -/// Storage proof related errors. -#[derive(Encode, Decode, Clone, Eq, PartialEq, PalletError, Debug, TypeInfo)] -pub enum Error { - /// Duplicate trie nodes are found in the proof. - DuplicateNodesInProof, - /// Unused trie nodes are found in the proof. - UnusedNodesInTheProof, - /// Expected storage root is missing from the proof. - StorageRootMismatch, - /// Unable to reach expected storage value using provided trie nodes. - StorageValueUnavailable, - /// The storage value is `None`. - StorageValueEmpty, - /// Failed to decode storage value. - StorageValueDecodeFailed(StrippableError), -} - -/// Return valid storage proof and state root. -/// -/// NOTE: This should only be used for **testing**. -#[cfg(feature = "std")] -pub fn craft_valid_storage_proof() -> (sp_core::H256, RawStorageProof) { - use sp_state_machine::{backend::Backend, prove_read, InMemoryBackend}; - - let state_version = sp_runtime::StateVersion::default(); - - // construct storage proof - let backend = >::from(( - vec![ - (None, vec![(b"key1".to_vec(), Some(b"value1".to_vec()))]), - (None, vec![(b"key2".to_vec(), Some(b"value2".to_vec()))]), - (None, vec![(b"key3".to_vec(), Some(b"value3".to_vec()))]), - (None, vec![(b"key4".to_vec(), Some((42u64, 42u32, 42u16, 42u8).encode()))]), - // Value is too big to fit in a branch node - (None, vec![(b"key11".to_vec(), Some(vec![0u8; 32]))]), - ], - state_version, - )); - let root = backend.storage_root(std::iter::empty(), state_version).0; - let proof = - prove_read(backend, &[&b"key1"[..], &b"key2"[..], &b"key4"[..], &b"key22"[..]]).unwrap(); - - (root, proof.into_nodes().into_iter().collect()) -} - -/// Record all keys for a given root. -pub fn record_all_keys( - db: &DB, - root: &TrieHash, -) -> Result>> -where - DB: hash_db::HashDBRef, -{ - let mut recorder = Recorder::::new(); - let trie = TrieDBBuilder::::new(db, root).with_recorder(&mut recorder).build(); - for x in trie.iter()? { - let (key, _) = x?; - trie.get(&key)?; - } - - // recorder may record the same trie node multiple times and we don't want duplicate nodes - // in our proofs => let's deduplicate it by collecting to the BTreeSet first - Ok(recorder - .drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect::>() - .into_iter() - .collect()) -} - -#[cfg(test)] -pub mod tests { - use super::*; - use codec::Encode; - - #[test] - fn storage_proof_check() { - let (root, proof) = craft_valid_storage_proof(); - - // check proof in runtime - let mut checker = - >::new(root, proof.clone()).unwrap(); - assert_eq!(checker.read_value(b"key1"), Ok(Some(b"value1".to_vec()))); - assert_eq!(checker.read_value(b"key2"), Ok(Some(b"value2".to_vec()))); - assert_eq!(checker.read_value(b"key4"), Ok(Some((42u64, 42u32, 42u16, 42u8).encode()))); - assert_eq!(checker.read_value(b"key11111"), Err(Error::StorageValueUnavailable)); - assert_eq!(checker.read_value(b"key22"), Ok(None)); - assert_eq!(checker.read_and_decode_value(b"key4"), Ok(Some((42u64, 42u32, 42u16, 42u8))),); - assert!(matches!( - checker.read_and_decode_value::<[u8; 64]>(b"key4"), - Err(Error::StorageValueDecodeFailed(_)), - )); - - // checking proof against invalid commitment fails - assert_eq!( - >::new(sp_core::H256::random(), proof).err(), - Some(Error::StorageRootMismatch) - ); - } - - #[test] - fn proof_with_duplicate_items_is_rejected() { - let (root, mut proof) = craft_valid_storage_proof(); - proof.push(proof.first().unwrap().clone()); - - assert_eq!( - StorageProofChecker::::new(root, proof).map(drop), - Err(Error::DuplicateNodesInProof), - ); - } - - #[test] - fn proof_with_unused_items_is_rejected() { - let (root, proof) = craft_valid_storage_proof(); - - let mut checker = - StorageProofChecker::::new(root, proof.clone()).unwrap(); - checker.read_value(b"key1").unwrap(); - checker.read_value(b"key2").unwrap(); - checker.read_value(b"key4").unwrap(); - checker.read_value(b"key22").unwrap(); - assert_eq!(checker.ensure_no_unused_nodes(), Ok(())); - - let checker = StorageProofChecker::::new(root, proof).unwrap(); - assert_eq!(checker.ensure_no_unused_nodes(), Err(Error::UnusedNodesInTheProof)); - } -} diff --git a/primitives/runtime/src/storage_types.rs b/primitives/runtime/src/storage_types.rs deleted file mode 100644 index 91c5451805a9861d9e58c462f9ab5849409d59a7..0000000000000000000000000000000000000000 --- a/primitives/runtime/src/storage_types.rs +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Wrapper for a runtime storage value that checks if value exceeds given maximum -//! during conversion. - -use codec::{Decode, Encode, MaxEncodedLen}; -use frame_support::traits::Get; -use scale_info::{Type, TypeInfo}; -use sp_runtime::RuntimeDebug; -use sp_std::{marker::PhantomData, ops::Deref}; - -/// Error that is returned when the value size exceeds maximal configured size. -#[derive(RuntimeDebug)] -pub struct MaximalSizeExceededError { - /// Size of the value. - pub value_size: usize, - /// Maximal configured size. - pub maximal_size: usize, -} - -/// A bounded runtime storage value. -#[derive(Clone, Decode, Encode, Eq, PartialEq)] -pub struct BoundedStorageValue { - value: V, - _phantom: PhantomData, -} - -impl sp_std::fmt::Debug for BoundedStorageValue { - fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - self.value.fmt(fmt) - } -} - -impl, V: Encode> BoundedStorageValue { - /// Construct `BoundedStorageValue` from the underlying `value` with all required checks. - /// - /// Returns error if value size exceeds given bounds. - pub fn try_from_inner(value: V) -> Result { - // this conversion is heavy (since we do encoding here), so we may want to optimize it later - // (e.g. by introducing custom Encode implementation, and turning `BoundedStorageValue` into - // `enum BoundedStorageValue { Decoded(V), Encoded(Vec) }`) - let value_size = value.encoded_size(); - let maximal_size = B::get() as usize; - if value_size > maximal_size { - Err(MaximalSizeExceededError { value_size, maximal_size }) - } else { - Ok(BoundedStorageValue { value, _phantom: Default::default() }) - } - } - - /// Convert into the inner type - pub fn into_inner(self) -> V { - self.value - } -} - -impl Deref for BoundedStorageValue { - type Target = V; - - fn deref(&self) -> &Self::Target { - &self.value - } -} - -impl TypeInfo for BoundedStorageValue { - type Identity = Self; - - fn type_info() -> Type { - V::type_info() - } -} - -impl, V: Encode> MaxEncodedLen for BoundedStorageValue { - fn max_encoded_len() -> usize { - B::get() as usize - } -} diff --git a/primitives/test-utils/Cargo.toml b/primitives/test-utils/Cargo.toml deleted file mode 100644 index 1b3ac9ee697cb50c8563922775b09ca469c1c06e..0000000000000000000000000000000000000000 --- a/primitives/test-utils/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "bp-test-utils" -version = "0.7.0" -description = "Utilities for testing substrate-based runtime bridge code" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -bp-header-chain = { path = "../header-chain", default-features = false } -bp-parachains = { path = "../parachains", default-features = false } -bp-polkadot-core = { path = "../polkadot-core", default-features = false } -bp-runtime = { path = "../runtime", default-features = false } -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false } -ed25519-dalek = { version = "2.1", default-features = false } -finality-grandpa = { version = "0.16.2", default-features = false } -sp-application-crypto = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = [ - "bp-header-chain/std", - "bp-parachains/std", - "bp-polkadot-core/std", - "bp-runtime/std", - "codec/std", - "ed25519-dalek/std", - "finality-grandpa/std", - "sp-application-crypto/std", - "sp-consensus-grandpa/std", - "sp-core/std", - "sp-runtime/std", - "sp-std/std", - "sp-trie/std", -] diff --git a/primitives/test-utils/src/keyring.rs b/primitives/test-utils/src/keyring.rs deleted file mode 100644 index 22691183acf7a16d9889841b82dd7936f8694b90..0000000000000000000000000000000000000000 --- a/primitives/test-utils/src/keyring.rs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for working with test accounts. - -use bp_header_chain::{justification::JustificationVerificationContext, AuthoritySet}; -use codec::Encode; -use ed25519_dalek::{Signature, SigningKey, VerifyingKey}; -use finality_grandpa::voter_set::VoterSet; -use sp_consensus_grandpa::{AuthorityId, AuthorityList, AuthorityWeight, SetId}; -use sp_runtime::RuntimeDebug; -use sp_std::prelude::*; - -/// Set of test accounts with friendly names: Alice. -pub const ALICE: Account = Account(0); -/// Set of test accounts with friendly names: Bob. -pub const BOB: Account = Account(1); -/// Set of test accounts with friendly names: Charlie. -pub const CHARLIE: Account = Account(2); -/// Set of test accounts with friendly names: Dave. -pub const DAVE: Account = Account(3); -/// Set of test accounts with friendly names: Eve. -pub const EVE: Account = Account(4); -/// Set of test accounts with friendly names: Ferdie. -pub const FERDIE: Account = Account(5); - -/// A test account which can be used to sign messages. -#[derive(RuntimeDebug, Clone, Copy)] -pub struct Account(pub u16); - -impl Account { - /// Returns public key of this account. - pub fn public(&self) -> VerifyingKey { - self.pair().verifying_key() - } - - /// Returns key pair, used to sign data on behalf of this account. - pub fn pair(&self) -> SigningKey { - let data = self.0.encode(); - let mut bytes = [0_u8; 32]; - bytes[0..data.len()].copy_from_slice(&data); - SigningKey::from_bytes(&bytes) - } - - /// Generate a signature of given message. - pub fn sign(&self, msg: &[u8]) -> Signature { - use ed25519_dalek::Signer; - self.pair().sign(msg) - } -} - -impl From for AuthorityId { - fn from(p: Account) -> Self { - sp_application_crypto::UncheckedFrom::unchecked_from(p.public().to_bytes()) - } -} - -/// Get a valid set of voters for a Grandpa round. -pub fn voter_set() -> VoterSet { - VoterSet::new(authority_list()).unwrap() -} - -/// Get a valid justification verification context for a GRANDPA round. -pub fn verification_context(set_id: SetId) -> JustificationVerificationContext { - AuthoritySet { authorities: authority_list(), set_id }.try_into().unwrap() -} - -/// Convenience function to get a list of Grandpa authorities. -pub fn authority_list() -> AuthorityList { - test_keyring().iter().map(|(id, w)| (AuthorityId::from(*id), *w)).collect() -} - -/// Get the corresponding identities from the keyring for the "standard" authority set. -pub fn test_keyring() -> Vec<(Account, AuthorityWeight)> { - vec![(ALICE, 1), (BOB, 1), (CHARLIE, 1)] -} - -/// Get a list of "unique" accounts. -pub fn accounts(len: u16) -> Vec { - (0..len).map(Account).collect() -} diff --git a/primitives/test-utils/src/lib.rs b/primitives/test-utils/src/lib.rs deleted file mode 100644 index 1d80890779bf8310b393d585749e96f9577196a1..0000000000000000000000000000000000000000 --- a/primitives/test-utils/src/lib.rs +++ /dev/null @@ -1,347 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities for testing runtime code. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -use bp_header_chain::justification::{required_justification_precommits, GrandpaJustification}; -use bp_parachains::parachain_head_storage_key_at_source; -use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::record_all_trie_keys; -use codec::Encode; -use sp_consensus_grandpa::{AuthorityId, AuthoritySignature, AuthorityWeight, SetId}; -use sp_runtime::traits::{Header as HeaderT, One, Zero}; -use sp_std::prelude::*; -use sp_trie::{trie_types::TrieDBMutBuilderV1, LayoutV1, MemoryDB, TrieMut}; - -// Re-export all our test account utilities -pub use keyring::*; - -mod keyring; - -/// GRANDPA round number used across tests. -pub const TEST_GRANDPA_ROUND: u64 = 1; -/// GRANDPA validators set id used across tests. -pub const TEST_GRANDPA_SET_ID: SetId = 1; -/// Name of the `Paras` pallet used across tests. -pub const PARAS_PALLET_NAME: &str = "Paras"; - -/// Configuration parameters when generating test GRANDPA justifications. -#[derive(Clone)] -pub struct JustificationGeneratorParams { - /// The header which we want to finalize. - pub header: H, - /// The GRANDPA round number for the current authority set. - pub round: u64, - /// The current authority set ID. - pub set_id: SetId, - /// The current GRANDPA authority set. - /// - /// The size of the set will determine the number of pre-commits in our justification. - pub authorities: Vec<(Account, AuthorityWeight)>, - /// The total number of precommit ancestors in the `votes_ancestries` field our justification. - /// - /// These may be distributed among many forks. - pub ancestors: u32, - /// The number of forks. - /// - /// Useful for creating a "worst-case" scenario in which each authority is on its own fork. - pub forks: u32, -} - -impl Default for JustificationGeneratorParams { - fn default() -> Self { - let required_signatures = required_justification_precommits(test_keyring().len() as _); - Self { - header: test_header(One::one()), - round: TEST_GRANDPA_ROUND, - set_id: TEST_GRANDPA_SET_ID, - authorities: test_keyring().into_iter().take(required_signatures as _).collect(), - ancestors: 2, - forks: 1, - } - } -} - -/// Make a valid GRANDPA justification with sensible defaults -pub fn make_default_justification(header: &H) -> GrandpaJustification { - let params = JustificationGeneratorParams:: { header: header.clone(), ..Default::default() }; - - make_justification_for_header(params) -} - -/// Generate justifications in a way where we are able to tune the number of pre-commits -/// and vote ancestries which are included in the justification. -/// -/// This is useful for benchmarkings where we want to generate valid justifications with -/// a specific number of pre-commits (tuned with the number of "authorities") and/or a specific -/// number of vote ancestries (tuned with the "votes" parameter). -/// -/// Note: This needs at least three authorities or else the verifier will complain about -/// being given an invalid commit. -pub fn make_justification_for_header( - params: JustificationGeneratorParams, -) -> GrandpaJustification { - let JustificationGeneratorParams { header, round, set_id, authorities, mut ancestors, forks } = - params; - let (target_hash, target_number) = (header.hash(), *header.number()); - let mut votes_ancestries = vec![]; - let mut precommits = vec![]; - - assert!(forks != 0, "Need at least one fork to have a chain.."); - assert!( - forks as usize <= authorities.len(), - "If we have more forks than authorities we can't create valid pre-commits for all the forks." - ); - - // Roughly, how many vote ancestries do we want per fork - let target_depth = (ancestors + forks - 1) / forks; - - let mut unsigned_precommits = vec![]; - for i in 0..forks { - let depth = if ancestors >= target_depth { - ancestors -= target_depth; - target_depth - } else { - ancestors - }; - - // Note: Adding 1 to account for the target header - let chain = generate_chain(i, depth + 1, &header); - - // We don't include our finality target header in the vote ancestries - for child in &chain[1..] { - votes_ancestries.push(child.clone()); - } - - // The header we need to use when pre-committing is the one at the highest height - // on our chain. - let precommit_candidate = chain.last().map(|h| (h.hash(), *h.number())).unwrap(); - unsigned_precommits.push(precommit_candidate); - } - - for (i, (id, _weight)) in authorities.iter().enumerate() { - // Assign authorities to sign pre-commits in a round-robin fashion - let target = unsigned_precommits[i % forks as usize]; - let precommit = signed_precommit::(id, target, round, set_id); - - precommits.push(precommit); - } - - GrandpaJustification { - round, - commit: finality_grandpa::Commit { target_hash, target_number, precommits }, - votes_ancestries, - } -} - -fn generate_chain(fork_id: u32, depth: u32, ancestor: &H) -> Vec { - let mut headers = vec![ancestor.clone()]; - - for i in 1..depth { - let parent = &headers[(i - 1) as usize]; - let (hash, num) = (parent.hash(), *parent.number()); - - let mut header = test_header::(num + One::one()); - header.set_parent_hash(hash); - - // Modifying the digest so headers at the same height but in different forks have different - // hashes - header.digest_mut().logs.push(sp_runtime::DigestItem::Other(fork_id.encode())); - - headers.push(header); - } - - headers -} - -/// Make valid proof for parachain `heads` -pub fn prepare_parachain_heads_proof( - heads: Vec<(u32, ParaHead)>, -) -> (H::Hash, ParaHeadsProof, Vec<(ParaId, ParaHash)>) { - let mut parachains = Vec::with_capacity(heads.len()); - let mut root = Default::default(); - let mut mdb = MemoryDB::default(); - { - let mut trie = TrieDBMutBuilderV1::::new(&mut mdb, &mut root).build(); - for (parachain, head) in heads { - let storage_key = - parachain_head_storage_key_at_source(PARAS_PALLET_NAME, ParaId(parachain)); - trie.insert(&storage_key.0, &head.encode()) - .map_err(|_| "TrieMut::insert has failed") - .expect("TrieMut::insert should not fail in tests"); - parachains.push((ParaId(parachain), head.hash())); - } - } - - // generate storage proof to be delivered to This chain - let storage_proof = record_all_trie_keys::, _>(&mdb, &root) - .map_err(|_| "record_all_trie_keys has failed") - .expect("record_all_trie_keys should not fail in benchmarks"); - - (root, ParaHeadsProof { storage_proof }, parachains) -} - -/// Create signed precommit with given target. -pub fn signed_precommit( - signer: &Account, - target: (H::Hash, H::Number), - round: u64, - set_id: SetId, -) -> finality_grandpa::SignedPrecommit { - let precommit = finality_grandpa::Precommit { target_hash: target.0, target_number: target.1 }; - - let encoded = sp_consensus_grandpa::localized_payload( - round, - set_id, - &finality_grandpa::Message::Precommit(precommit.clone()), - ); - - let signature = signer.sign(&encoded); - let raw_signature: Vec = signature.to_bytes().into(); - - // Need to wrap our signature and id types that they match what our `SignedPrecommit` is - // expecting - let signature = AuthoritySignature::try_from(raw_signature).expect( - "We know our Keypair is good, - so our signature must also be good.", - ); - let id = (*signer).into(); - - finality_grandpa::SignedPrecommit { precommit, signature, id } -} - -/// Get a header for testing. -/// -/// The correct parent hash will be used if given a non-zero header. -pub fn test_header(number: H::Number) -> H { - let default = |num| { - H::new(num, Default::default(), Default::default(), Default::default(), Default::default()) - }; - - let mut header = default(number); - if number != Zero::zero() { - let parent_hash = default(number - One::one()).hash(); - header.set_parent_hash(parent_hash); - } - - header -} - -/// Get a header for testing with given `state_root`. -/// -/// The correct parent hash will be used if given a non-zero header. -pub fn test_header_with_root(number: H::Number, state_root: H::Hash) -> H { - let mut header: H = test_header(number); - header.set_state_root(state_root); - header -} - -/// Convenience function for generating a Header ID at a given block number. -pub fn header_id(index: u8) -> (H::Hash, H::Number) { - (test_header::(index.into()).hash(), index.into()) -} - -#[macro_export] -/// Adds methods for testing the `set_owner()` and `set_operating_mode()` for a pallet. -/// Some values are hardcoded like: -/// - `run_test()` -/// - `Pallet::` -/// - `PalletOwner::` -/// - `PalletOperatingMode::` -/// While this is not ideal, all the pallets use the same names, so it works for the moment. -/// We can revisit this in the future if anything changes. -macro_rules! generate_owned_bridge_module_tests { - ($normal_operating_mode: expr, $halted_operating_mode: expr) => { - #[test] - fn test_set_owner() { - run_test(|| { - PalletOwner::::put(1); - - // The root should be able to change the owner. - assert_ok!(Pallet::::set_owner(RuntimeOrigin::root(), Some(2))); - assert_eq!(PalletOwner::::get(), Some(2)); - - // The owner should be able to change the owner. - assert_ok!(Pallet::::set_owner(RuntimeOrigin::signed(2), Some(3))); - assert_eq!(PalletOwner::::get(), Some(3)); - - // Other users shouldn't be able to change the owner. - assert_noop!( - Pallet::::set_owner(RuntimeOrigin::signed(1), Some(4)), - DispatchError::BadOrigin - ); - assert_eq!(PalletOwner::::get(), Some(3)); - }); - } - - #[test] - fn test_set_operating_mode() { - run_test(|| { - PalletOwner::::put(1); - PalletOperatingMode::::put($normal_operating_mode); - - // The root should be able to halt the pallet. - assert_ok!(Pallet::::set_operating_mode( - RuntimeOrigin::root(), - $halted_operating_mode - )); - assert_eq!(PalletOperatingMode::::get(), $halted_operating_mode); - // The root should be able to resume the pallet. - assert_ok!(Pallet::::set_operating_mode( - RuntimeOrigin::root(), - $normal_operating_mode - )); - assert_eq!(PalletOperatingMode::::get(), $normal_operating_mode); - - // The owner should be able to halt the pallet. - assert_ok!(Pallet::::set_operating_mode( - RuntimeOrigin::signed(1), - $halted_operating_mode - )); - assert_eq!(PalletOperatingMode::::get(), $halted_operating_mode); - // The owner should be able to resume the pallet. - assert_ok!(Pallet::::set_operating_mode( - RuntimeOrigin::signed(1), - $normal_operating_mode - )); - assert_eq!(PalletOperatingMode::::get(), $normal_operating_mode); - - // Other users shouldn't be able to halt the pallet. - assert_noop!( - Pallet::::set_operating_mode( - RuntimeOrigin::signed(2), - $halted_operating_mode - ), - DispatchError::BadOrigin - ); - assert_eq!(PalletOperatingMode::::get(), $normal_operating_mode); - // Other users shouldn't be able to resume the pallet. - PalletOperatingMode::::put($halted_operating_mode); - assert_noop!( - Pallet::::set_operating_mode( - RuntimeOrigin::signed(2), - $normal_operating_mode - ), - DispatchError::BadOrigin - ); - assert_eq!(PalletOperatingMode::::get(), $halted_operating_mode); - }); - } - }; -} diff --git a/primitives/xcm-bridge-hub-router/Cargo.toml b/primitives/xcm-bridge-hub-router/Cargo.toml deleted file mode 100644 index a9f584e2a0316896ba27912698e114ba329402d3..0000000000000000000000000000000000000000 --- a/primitives/xcm-bridge-hub-router/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "bp-xcm-bridge-hub-router" -description = "Primitives of the xcm-bridge-hub fee pallet." -version = "0.6.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["bit-vec", "derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["bit-vec", "derive"] } - -# Substrate Dependencies -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = ["codec/std", "scale-info/std", "sp-core/std", "sp-runtime/std"] diff --git a/primitives/xcm-bridge-hub-router/src/lib.rs b/primitives/xcm-bridge-hub-router/src/lib.rs deleted file mode 100644 index dbedb7a52c7fee85e35c7fadc67d11d8cfa434dc..0000000000000000000000000000000000000000 --- a/primitives/xcm-bridge-hub-router/src/lib.rs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of the `xcm-bridge-hub-router` pallet. - -#![cfg_attr(not(feature = "std"), no_std)] - -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; -use sp_core::H256; -use sp_runtime::{FixedU128, RuntimeDebug}; - -/// Minimal delivery fee factor. -pub const MINIMAL_DELIVERY_FEE_FACTOR: FixedU128 = FixedU128::from_u32(1); - -/// XCM channel status provider that may report whether it is congested or not. -/// -/// By channel we mean the physical channel that is used to deliver messages of one -/// of the bridge queues. -pub trait XcmChannelStatusProvider { - /// Returns true if the channel is currently congested. - fn is_congested() -> bool; -} - -impl XcmChannelStatusProvider for () { - fn is_congested() -> bool { - false - } -} - -/// Current status of the bridge. -#[derive(Clone, Decode, Encode, Eq, PartialEq, TypeInfo, MaxEncodedLen, RuntimeDebug)] -pub struct BridgeState { - /// Current delivery fee factor. - pub delivery_fee_factor: FixedU128, - /// Bridge congestion flag. - pub is_congested: bool, -} - -impl Default for BridgeState { - fn default() -> BridgeState { - BridgeState { delivery_fee_factor: MINIMAL_DELIVERY_FEE_FACTOR, is_congested: false } - } -} - -/// A minimized version of `pallet-xcm-bridge-hub-router::Call` that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum XcmBridgeHubRouterCall { - /// `pallet-xcm-bridge-hub-router::Call::report_bridge_status` - #[codec(index = 0)] - report_bridge_status { bridge_id: H256, is_congested: bool }, -} diff --git a/primitives/xcm-bridge-hub/Cargo.toml b/primitives/xcm-bridge-hub/Cargo.toml deleted file mode 100644 index 1a5bb742eed4743b5071eeb711e103af0b97f824..0000000000000000000000000000000000000000 --- a/primitives/xcm-bridge-hub/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "bp-xcm-bridge-hub" -description = "Primitives of the xcm-bridge-hub pallet." -version = "0.2.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] - -# Substrate Dependencies -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = ["std"] -std = ["sp-std/std"] diff --git a/primitives/xcm-bridge-hub/src/lib.rs b/primitives/xcm-bridge-hub/src/lib.rs deleted file mode 100644 index 9745011c902d2c3949b81886c872f438678a11b8..0000000000000000000000000000000000000000 --- a/primitives/xcm-bridge-hub/src/lib.rs +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Primitives of the xcm-bridge-hub pallet. - -#![warn(missing_docs)] -#![cfg_attr(not(feature = "std"), no_std)] - -/// Encoded XCM blob. We expect the bridge messages pallet to use this blob type for both inbound -/// and outbound payloads. -pub type XcmAsPlainPayload = sp_std::vec::Vec; diff --git a/relay-clients/client-bridge-hub-kusama/Cargo.toml b/relay-clients/client-bridge-hub-kusama/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ac3c382baa78d663627bbe09cf8fa5adbcc7866a --- /dev/null +++ b/relay-clients/client-bridge-hub-kusama/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "relay-bridge-hub-kusama-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-bridge-hub-kusama = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-header-chain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-messages = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bridge-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-bridge-hub-kusama/src/codegen_runtime.rs b/relay-clients/client-bridge-hub-kusama/src/codegen_runtime.rs similarity index 67% rename from relays/client-bridge-hub-kusama/src/codegen_runtime.rs rename to relay-clients/client-bridge-hub-kusama/src/codegen_runtime.rs index 2da4c3014b254920bb596476e751598c2c885ed5..bf230104409a6c92c6b4f9cd1d62efa21e400ac2 100644 --- a/relays/client-bridge-hub-kusama/src/codegen_runtime.rs +++ b/relay-clients/client-bridge-hub-kusama/src/codegen_runtime.rs @@ -17,7 +17,7 @@ //! Autogenerated runtime API //! THIS FILE WAS AUTOGENERATED USING parity-bridges-common::runtime-codegen //! EXECUTED COMMAND: target/debug/runtime-codegen --from-node-url -//! wss://kusama-bridge-hub-rpc.polkadot.io +//! wss://kusama-bridge-hub-rpc.polkadot.io/ #[allow(dead_code, unused_imports, non_camel_case_types)] #[allow(clippy::all)] @@ -31,6 +31,11 @@ pub mod api { use super::runtime_types; pub mod bounded_collections { use super::runtime_types; + pub mod bounded_btree_set { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct BoundedBTreeSet<_0>(pub ::std::vec::Vec<_0>); + } pub mod bounded_vec { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -244,6 +249,23 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct StrippableError; } + pub mod bridge_hub_common { + use super::runtime_types; + pub mod message_queue { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AggregateMessageOrigin { + #[codec(index = 0)] + Here, + #[codec(index = 1)] + Parent, + #[codec(index = 2)] + Sibling(runtime_types::polkadot_parachain_primitives::primitives::Id), + #[codec(index = 3)] + Snowbridge(runtime_types::snowbridge_core::ChannelId), + } + } + } pub mod bridge_hub_kusama_runtime { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -273,6 +295,8 @@ pub mod api { ParachainSystem(runtime_types::cumulus_pallet_parachain_system::pallet::Call), #[codec(index = 2)] Timestamp(runtime_types::pallet_timestamp::pallet::Call), + #[codec(index = 3)] + ParachainInfo(runtime_types::staging_parachain_info::pallet::Call), #[codec(index = 10)] Balances(runtime_types::pallet_balances::pallet::Call), #[codec(index = 21)] @@ -283,6 +307,8 @@ pub mod api { XcmpQueue(runtime_types::cumulus_pallet_xcmp_queue::pallet::Call), #[codec(index = 31)] PolkadotXcm(runtime_types::pallet_xcm::pallet::Call), + #[codec(index = 32)] + CumulusXcm(runtime_types::cumulus_pallet_xcm::pallet::Call), #[codec(index = 33)] DmpQueue(runtime_types::cumulus_pallet_dmp_queue::pallet::Call), #[codec(index = 40)] @@ -297,6 +323,20 @@ pub mod api { BridgePolkadotParachains(runtime_types::pallet_bridge_parachains::pallet::Call), #[codec(index = 53)] BridgePolkadotMessages(runtime_types::pallet_bridge_messages::pallet::Call), + #[codec(index = 80)] + EthereumInboundQueue(runtime_types::snowbridge_pallet_inbound_queue::pallet::Call), + #[codec(index = 81)] + EthereumOutboundQueue( + runtime_types::snowbridge_pallet_outbound_queue::pallet::Call, + ), + #[codec(index = 82)] + EthereumBeaconClient( + runtime_types::snowbridge_pallet_ethereum_client::pallet::Call, + ), + #[codec(index = 83)] + EthereumSystem(runtime_types::snowbridge_pallet_system::pallet::Call), + #[codec(index = 175)] + MessageQueue(runtime_types::pallet_message_queue::pallet::Call), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum RuntimeError { @@ -314,10 +354,6 @@ pub mod api { XcmpQueue(runtime_types::cumulus_pallet_xcmp_queue::pallet::Error), #[codec(index = 31)] PolkadotXcm(runtime_types::pallet_xcm::pallet::Error), - #[codec(index = 32)] - CumulusXcm(runtime_types::cumulus_pallet_xcm::pallet::Error), - #[codec(index = 33)] - DmpQueue(runtime_types::cumulus_pallet_dmp_queue::pallet::Error), #[codec(index = 40)] Utility(runtime_types::pallet_utility::pallet::Error), #[codec(index = 41)] @@ -330,6 +366,20 @@ pub mod api { BridgePolkadotParachains(runtime_types::pallet_bridge_parachains::pallet::Error), #[codec(index = 53)] BridgePolkadotMessages(runtime_types::pallet_bridge_messages::pallet::Error), + #[codec(index = 80)] + EthereumInboundQueue(runtime_types::snowbridge_pallet_inbound_queue::pallet::Error), + #[codec(index = 81)] + EthereumOutboundQueue( + runtime_types::snowbridge_pallet_outbound_queue::pallet::Error, + ), + #[codec(index = 82)] + EthereumBeaconClient( + runtime_types::snowbridge_pallet_ethereum_client::pallet::Error, + ), + #[codec(index = 83)] + EthereumSystem(runtime_types::snowbridge_pallet_system::pallet::Error), + #[codec(index = 175)] + MessageQueue(runtime_types::pallet_message_queue::pallet::Error), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum RuntimeEvent { @@ -365,6 +415,20 @@ pub mod api { BridgePolkadotParachains(runtime_types::pallet_bridge_parachains::pallet::Event), #[codec(index = 53)] BridgePolkadotMessages(runtime_types::pallet_bridge_messages::pallet::Event), + #[codec(index = 80)] + EthereumInboundQueue(runtime_types::snowbridge_pallet_inbound_queue::pallet::Event), + #[codec(index = 81)] + EthereumOutboundQueue( + runtime_types::snowbridge_pallet_outbound_queue::pallet::Event, + ), + #[codec(index = 82)] + EthereumBeaconClient( + runtime_types::snowbridge_pallet_ethereum_client::pallet::Event, + ), + #[codec(index = 83)] + EthereumSystem(runtime_types::snowbridge_pallet_system::pallet::Event), + #[codec(index = 175)] + MessageQueue(runtime_types::pallet_message_queue::pallet::Event), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum RuntimeHoldReason {} @@ -392,7 +456,7 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct RefundBridgedParachainMessages; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct RefundTransactionExtensionAdapter<_0>(pub _0); + pub struct RefundSignedExtensionAdapter<_0>(pub _0); } } pub mod cumulus_pallet_dmp_queue { @@ -400,65 +464,56 @@ pub mod api { pub mod pallet { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Call { - #[codec(index = 0)] - service_overweight { - index: ::core::primitive::u64, - weight_limit: ::sp_weights::Weight, - }, - } + pub enum Call {} #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Error { + pub enum Event { #[codec(index = 0)] - Unknown, + StartedExport, #[codec(index = 1)] - OverLimit, + Exported { page: ::core::primitive::u32 }, + #[codec(index = 2)] + ExportFailed { page: ::core::primitive::u32 }, + #[codec(index = 3)] + CompletedExport, + #[codec(index = 4)] + StartedOverweightExport, + #[codec(index = 5)] + ExportedOverweight { index: ::core::primitive::u64 }, + #[codec(index = 6)] + ExportOverweightFailed { index: ::core::primitive::u64 }, + #[codec(index = 7)] + CompletedOverweightExport, + #[codec(index = 8)] + StartedCleanup, + #[codec(index = 9)] + CleanedSome { keys_removed: ::core::primitive::u32 }, + #[codec(index = 10)] + Completed { error: ::core::primitive::bool }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Event { + pub enum MigrationState { #[codec(index = 0)] - InvalidFormat { message_hash: [::core::primitive::u8; 32usize] }, + NotStarted, #[codec(index = 1)] - UnsupportedVersion { message_hash: [::core::primitive::u8; 32usize] }, + StartedExport { next_begin_used: ::core::primitive::u32 }, #[codec(index = 2)] - ExecutedDownward { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - outcome: runtime_types::xcm::v3::traits::Outcome, - }, + CompletedExport, #[codec(index = 3)] - WeightExhausted { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - remaining_weight: ::sp_weights::Weight, - required_weight: ::sp_weights::Weight, - }, + StartedOverweightExport { next_overweight_index: ::core::primitive::u64 }, #[codec(index = 4)] - OverweightEnqueued { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - overweight_index: ::core::primitive::u64, - required_weight: ::sp_weights::Weight, - }, + CompletedOverweightExport, #[codec(index = 5)] - OverweightServiced { - overweight_index: ::core::primitive::u64, - weight_used: ::sp_weights::Weight, + StartedCleanup { + cursor: ::core::option::Option< + runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + >, }, #[codec(index = 6)] - MaxMessagesExhausted { message_hash: [::core::primitive::u8; 32usize] }, + Completed, } } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct ConfigData { - pub max_individual: ::sp_weights::Weight, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct PageIndexData { - pub begin_used: ::core::primitive::u32, - pub end_used: ::core::primitive::u32, - pub overweight_count: ::core::primitive::u64, - } } pub mod cumulus_pallet_parachain_system { use super::runtime_types; @@ -495,15 +550,13 @@ pub mod api { #[codec(index = 2)] ValidationFunctionDiscarded, #[codec(index = 3)] - UpgradeAuthorized { code_hash: ::subxt::utils::H256 }, - #[codec(index = 4)] DownwardMessagesReceived { count: ::core::primitive::u32 }, - #[codec(index = 5)] + #[codec(index = 4)] DownwardMessagesProcessed { weight_used: ::sp_weights::Weight, dmq_head: ::subxt::utils::H256, }, - #[codec(index = 6)] + #[codec(index = 5)] UpwardMessageSent { message_hash: ::core::option::Option<[::core::primitive::u8; 32usize]>, }, @@ -533,18 +586,13 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct UsedBandwidth { pub ump_msg_count : :: core :: primitive :: u32 , pub ump_total_bytes : :: core :: primitive :: u32 , pub hrmp_outgoing : :: subxt :: utils :: KeyedVec < runtime_types :: polkadot_parachain_primitives :: primitives :: Id , runtime_types :: cumulus_pallet_parachain_system :: unincluded_segment :: HrmpChannelUpdate > , } } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct CodeUpgradeAuthorization { - pub code_hash: ::subxt::utils::H256, - pub check_version: ::core::primitive::bool, - } } pub mod cumulus_pallet_xcm { use super::runtime_types; pub mod pallet { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Error {} + pub enum Call {} #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] @@ -554,7 +602,7 @@ pub mod api { #[codec(index = 2)] ExecutedDownward( [::core::primitive::u8; 32usize], - runtime_types::xcm::v3::traits::Outcome, + runtime_types::staging_xcm::v4::traits::Outcome, ), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -572,11 +620,6 @@ pub mod api { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Call { - #[codec(index = 0)] - service_overweight { - index: ::core::primitive::u64, - weight_limit: ::sp_weights::Weight, - }, #[codec(index = 1)] suspend_xcm_execution, #[codec(index = 2)] @@ -587,75 +630,23 @@ pub mod api { update_drop_threshold { new: ::core::primitive::u32 }, #[codec(index = 5)] update_resume_threshold { new: ::core::primitive::u32 }, - #[codec(index = 6)] - update_threshold_weight { new: ::sp_weights::Weight }, - #[codec(index = 7)] - update_weight_restrict_decay { new: ::sp_weights::Weight }, - #[codec(index = 8)] - update_xcmp_max_individual_weight { new: ::sp_weights::Weight }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { #[codec(index = 0)] - FailedToSend, + BadQueueConfig, #[codec(index = 1)] - BadXcmOrigin, + AlreadySuspended, #[codec(index = 2)] - BadXcm, - #[codec(index = 3)] - BadOverweightIndex, - #[codec(index = 4)] - WeightOverLimit, + AlreadyResumed, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] - Success { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - weight: ::sp_weights::Weight, - }, - #[codec(index = 1)] - Fail { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - error: runtime_types::xcm::v3::traits::Error, - weight: ::sp_weights::Weight, - }, - #[codec(index = 2)] - BadVersion { message_hash: [::core::primitive::u8; 32usize] }, - #[codec(index = 3)] - BadFormat { message_hash: [::core::primitive::u8; 32usize] }, - #[codec(index = 4)] XcmpMessageSent { message_hash: [::core::primitive::u8; 32usize] }, - #[codec(index = 5)] - OverweightEnqueued { - sender: runtime_types::polkadot_parachain_primitives::primitives::Id, - sent_at: ::core::primitive::u32, - index: ::core::primitive::u64, - required: ::sp_weights::Weight, - }, - #[codec(index = 6)] - OverweightServiced { index: ::core::primitive::u64, used: ::sp_weights::Weight }, } } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct InboundChannelDetails { - pub sender: runtime_types::polkadot_parachain_primitives::primitives::Id, - pub state: runtime_types::cumulus_pallet_xcmp_queue::InboundState, - pub message_metadata: ::std::vec::Vec<( - ::core::primitive::u32, - runtime_types::polkadot_parachain_primitives::primitives::XcmpMessageFormat, - )>, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum InboundState { - #[codec(index = 0)] - Ok, - #[codec(index = 1)] - Suspended, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct OutboundChannelDetails { pub recipient: runtime_types::polkadot_parachain_primitives::primitives::Id, pub state: runtime_types::cumulus_pallet_xcmp_queue::OutboundState, @@ -675,9 +666,6 @@ pub mod api { pub suspend_threshold: ::core::primitive::u32, pub drop_threshold: ::core::primitive::u32, pub resume_threshold: ::core::primitive::u32, - pub threshold_weight: ::sp_weights::Weight, - pub weight_restrict_decay: ::sp_weights::Weight, - pub xcmp_max_individual_weight: ::sp_weights::Weight, } } pub mod cumulus_primitives_core { @@ -791,6 +779,22 @@ pub mod api { } pub mod traits { use super::runtime_types; + pub mod messages { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum ProcessMessageError { + #[codec(index = 0)] + BadFormat, + #[codec(index = 1)] + Corrupt, + #[codec(index = 2)] + Unsupported, + #[codec(index = 3)] + Overweight(::sp_weights::Weight), + #[codec(index = 4)] + Yield, + } + } pub mod tokens { use super::runtime_types; pub mod misc { @@ -900,6 +904,12 @@ pub mod api { }, #[codec(index = 7)] remark_with_event { remark: ::std::vec::Vec<::core::primitive::u8> }, + #[codec(index = 9)] + authorize_upgrade { code_hash: ::subxt::utils::H256 }, + #[codec(index = 10)] + authorize_upgrade_without_checks { code_hash: ::subxt::utils::H256 }, + #[codec(index = 11)] + apply_authorized_upgrade { code: ::std::vec::Vec<::core::primitive::u8> }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { @@ -915,6 +925,10 @@ pub mod api { NonZeroRefCount, #[codec(index = 5)] CallFiltered, + #[codec(index = 6)] + NothingAuthorized, + #[codec(index = 7)] + Unauthorized, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { @@ -935,6 +949,11 @@ pub mod api { KilledAccount { account: ::sp_core::crypto::AccountId32 }, #[codec(index = 5)] Remarked { sender: ::sp_core::crypto::AccountId32, hash: ::subxt::utils::H256 }, + #[codec(index = 6)] + UpgradeAuthorized { + code_hash: ::subxt::utils::H256, + check_version: ::core::primitive::bool, + }, } } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -946,6 +965,11 @@ pub mod api { pub data: _1, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CodeUpgradeAuthorization { + pub code_hash: ::subxt::utils::H256, + pub check_version: ::core::primitive::bool, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct EventRecord<_0, _1> { pub phase: runtime_types::frame_system::Phase, pub event: _0, @@ -1010,6 +1034,12 @@ pub mod api { #[codec(compact)] new_free: ::core::primitive::u128, }, + #[codec(index = 9)] + force_adjust_total_issuance { + direction: runtime_types::pallet_balances::types::AdjustmentDirection, + #[codec(compact)] + delta: ::core::primitive::u128, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { @@ -1033,6 +1063,10 @@ pub mod api { TooManyHolds, #[codec(index = 9)] TooManyFreezes, + #[codec(index = 10)] + IssuanceDeactivated, + #[codec(index = 11)] + DeltaZero, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { @@ -1115,6 +1149,11 @@ pub mod api { Frozen { who: ::sp_core::crypto::AccountId32, amount: ::core::primitive::u128 }, #[codec(index = 20)] Thawed { who: ::sp_core::crypto::AccountId32, amount: ::core::primitive::u128 }, + #[codec(index = 21)] + TotalIssuanceForced { + old: ::core::primitive::u128, + new: ::core::primitive::u128, + }, } } pub mod types { @@ -1127,6 +1166,13 @@ pub mod api { pub flags: runtime_types::pallet_balances::types::ExtraFlags, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AdjustmentDirection { + #[codec(index = 0)] + Increase, + #[codec(index = 1)] + Decrease, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct BalanceLock<_0> { pub id: [::core::primitive::u8; 8usize], pub amount: _0, @@ -1271,7 +1317,7 @@ pub mod api { # [codec (index = 0)] set_owner { new_owner : :: core :: option :: Option < :: sp_core :: crypto :: AccountId32 > , } , # [codec (index = 1)] set_operating_mode { operating_mode : runtime_types :: bp_messages :: MessagesOperatingMode , } , # [codec (index = 2)] receive_messages_proof { relayer_id_at_bridged_chain : :: sp_core :: crypto :: AccountId32 , proof : :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: utils :: H256 > , messages_count : :: core :: primitive :: u32 , dispatch_weight : :: sp_weights :: Weight , } , # [codec (index = 3)] receive_messages_delivery_proof { proof : :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: utils :: H256 > , relayers_state : :: bp_messages :: UnrewardedRelayersState , } , } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { - # [codec (index = 0)] NotOperatingNormally , # [codec (index = 1)] InactiveOutboundLane , # [codec (index = 2)] MessageDispatchInactive , # [codec (index = 3)] MessageRejectedByChainVerifier (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 4)] MessageRejectedByLaneVerifier (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 5)] MessageRejectedByPallet (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 6)] FailedToWithdrawMessageFee , # [codec (index = 7)] TooManyMessagesInTheProof , # [codec (index = 8)] InvalidMessagesProof , # [codec (index = 9)] InvalidMessagesDeliveryProof , # [codec (index = 10)] InvalidUnrewardedRelayersState , # [codec (index = 11)] InsufficientDispatchWeight , # [codec (index = 12)] MessageIsNotYetSent , # [codec (index = 13)] ReceivalConfirmation (runtime_types :: pallet_bridge_messages :: outbound_lane :: ReceivalConfirmationError ,) , # [codec (index = 14)] BridgeModule (runtime_types :: bp_runtime :: OwnedBridgeModuleError ,) , } + # [codec (index = 0)] NotOperatingNormally , # [codec (index = 1)] InactiveOutboundLane , # [codec (index = 2)] MessageDispatchInactive , # [codec (index = 3)] MessageRejectedByChainVerifier (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 4)] MessageRejectedByPallet (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 5)] FailedToWithdrawMessageFee , # [codec (index = 6)] TooManyMessagesInTheProof , # [codec (index = 7)] InvalidMessagesProof , # [codec (index = 8)] InvalidMessagesDeliveryProof , # [codec (index = 9)] InvalidUnrewardedRelayersState , # [codec (index = 10)] InsufficientDispatchWeight , # [codec (index = 11)] MessageIsNotYetSent , # [codec (index = 12)] ReceivalConfirmation (runtime_types :: pallet_bridge_messages :: outbound_lane :: ReceivalConfirmationError ,) , # [codec (index = 13)] BridgeModule (runtime_types :: bp_runtime :: OwnedBridgeModuleError ,) , } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { # [codec (index = 0)] MessageAccepted { lane_id : runtime_types :: bp_messages :: LaneId , nonce : :: core :: primitive :: u64 , } , # [codec (index = 1)] MessagesReceived (:: std :: vec :: Vec < runtime_types :: bp_messages :: ReceivedMessages < runtime_types :: bridge_runtime_common :: messages_xcm_extension :: XcmBlobMessageDispatchResult > > ,) , # [codec (index = 2)] MessagesDelivered { lane_id : runtime_types :: bp_messages :: LaneId , messages : runtime_types :: bp_messages :: DeliveredMessages , } , } @@ -1378,12 +1424,18 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] - RewardPaid { + RewardRegistered { relayer: ::sp_core::crypto::AccountId32, rewards_account_params: runtime_types::bp_relayers::RewardsAccountParams, reward: ::core::primitive::u128, }, #[codec(index = 1)] + RewardPaid { + relayer: ::sp_core::crypto::AccountId32, + rewards_account_params: runtime_types::bp_relayers::RewardsAccountParams, + reward: ::core::primitive::u128, + }, + #[codec(index = 2)] RegistrationUpdated { relayer: ::sp_core::crypto::AccountId32, registration: runtime_types::bp_relayers::registration::Registration< @@ -1391,9 +1443,9 @@ pub mod api { ::core::primitive::u128, >, }, - #[codec(index = 2)] - Deregistered { relayer: ::sp_core::crypto::AccountId32 }, #[codec(index = 3)] + Deregistered { relayer: ::sp_core::crypto::AccountId32 }, + #[codec(index = 4)] SlashedAndDeregistered { relayer: ::sp_core::crypto::AccountId32, registration: runtime_types::bp_relayers::registration::Registration< @@ -1424,6 +1476,13 @@ pub mod api { add_invulnerable { who: ::sp_core::crypto::AccountId32 }, #[codec(index = 6)] remove_invulnerable { who: ::sp_core::crypto::AccountId32 }, + #[codec(index = 7)] + update_bond { new_deposit: ::core::primitive::u128 }, + #[codec(index = 8)] + take_candidate_slot { + deposit: ::core::primitive::u128, + target: ::sp_core::crypto::AccountId32, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct CandidateInfo<_0, _1> { @@ -1450,6 +1509,22 @@ pub mod api { NoAssociatedValidatorId, #[codec(index = 8)] ValidatorNotRegistered, + #[codec(index = 9)] + InsertToCandidateListFailed, + #[codec(index = 10)] + RemoveFromCandidateListFailed, + #[codec(index = 11)] + DepositTooLow, + #[codec(index = 12)] + UpdateCandidateListFailed, + #[codec(index = 13)] + InsufficientBond, + #[codec(index = 14)] + TargetIsNotCandidate, + #[codec(index = 15)] + IdenticalDeposit, + #[codec(index = 16)] + InvalidUnreserve, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { @@ -1471,12 +1546,125 @@ pub mod api { deposit: ::core::primitive::u128, }, #[codec(index = 6)] - CandidateRemoved { account_id: ::sp_core::crypto::AccountId32 }, + CandidateBondUpdated { + account_id: ::sp_core::crypto::AccountId32, + deposit: ::core::primitive::u128, + }, #[codec(index = 7)] + CandidateRemoved { account_id: ::sp_core::crypto::AccountId32 }, + #[codec(index = 8)] + CandidateReplaced { + old: ::sp_core::crypto::AccountId32, + new: ::sp_core::crypto::AccountId32, + deposit: ::core::primitive::u128, + }, + #[codec(index = 9)] InvalidInvulnerableSkipped { account_id: ::sp_core::crypto::AccountId32 }, } } } + pub mod pallet_message_queue { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + reap_page { + message_origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + page_index: ::core::primitive::u32, + }, + #[codec(index = 1)] + execute_overweight { + message_origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + page: ::core::primitive::u32, + index: ::core::primitive::u32, + weight_limit: ::sp_weights::Weight, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + NotReapable, + #[codec(index = 1)] + NoPage, + #[codec(index = 2)] + NoMessage, + #[codec(index = 3)] + AlreadyProcessed, + #[codec(index = 4)] + Queued, + #[codec(index = 5)] + InsufficientWeight, + #[codec(index = 6)] + TemporarilyUnprocessable, + #[codec(index = 7)] + QueuePaused, + #[codec(index = 8)] + RecursiveDisallowed, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + ProcessingFailed { + id: ::subxt::utils::H256, + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + error: runtime_types::frame_support::traits::messages::ProcessMessageError, + }, + #[codec(index = 1)] + Processed { + id: ::subxt::utils::H256, + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + weight_used: ::sp_weights::Weight, + success: ::core::primitive::bool, + }, + #[codec(index = 2)] + OverweightEnqueued { + id: [::core::primitive::u8; 32usize], + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + page_index: ::core::primitive::u32, + message_index: ::core::primitive::u32, + }, + #[codec(index = 3)] + PageReaped { + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + index: ::core::primitive::u32, + }, + } + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct BookState<_0> { + pub begin: ::core::primitive::u32, + pub end: ::core::primitive::u32, + pub count: ::core::primitive::u32, + pub ready_neighbours: + ::core::option::Option>, + pub message_count: ::core::primitive::u64, + pub size: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Neighbours<_0> { + pub prev: _0, + pub next: _0, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Page<_0> { + pub remaining: _0, + pub remaining_size: _0, + pub first_index: _0, + pub first: _0, + pub last: _0, + pub heap: runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + } + } pub mod pallet_multisig { use super::runtime_types; pub mod pallet { @@ -1776,21 +1964,21 @@ pub mod api { pub enum Call { #[codec(index = 0)] send { - dest: ::std::boxed::Box, + dest: ::std::boxed::Box, message: ::std::boxed::Box, }, #[codec(index = 1)] teleport_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, }, #[codec(index = 2)] reserve_transfer_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, }, #[codec(index = 3)] @@ -1800,9 +1988,8 @@ pub mod api { }, #[codec(index = 4)] force_xcm_version { - location: ::std::boxed::Box< - runtime_types::staging_xcm::v3::multilocation::MultiLocation, - >, + location: + ::std::boxed::Box, version: ::core::primitive::u32, }, #[codec(index = 5)] @@ -1811,30 +1998,43 @@ pub mod api { }, #[codec(index = 6)] force_subscribe_version_notify { - location: ::std::boxed::Box, + location: ::std::boxed::Box, }, #[codec(index = 7)] force_unsubscribe_version_notify { - location: ::std::boxed::Box, + location: ::std::boxed::Box, }, #[codec(index = 8)] limited_reserve_transfer_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, weight_limit: runtime_types::xcm::v3::WeightLimit, }, #[codec(index = 9)] limited_teleport_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, weight_limit: runtime_types::xcm::v3::WeightLimit, }, #[codec(index = 10)] force_suspension { suspended: ::core::primitive::bool }, + #[codec(index = 11)] + transfer_assets { + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, + fee_asset_item: ::core::primitive::u32, + weight_limit: runtime_types::xcm::v3::WeightLimit, + }, + #[codec(index = 12)] + claim_assets { + assets: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { @@ -1865,7 +2065,7 @@ pub mod api { #[codec(index = 12)] AlreadySubscribed, #[codec(index = 13)] - InvalidAsset, + CannotCheckOutTeleport, #[codec(index = 14)] LowBalance, #[codec(index = 15)] @@ -1878,27 +2078,37 @@ pub mod api { LockNotFound, #[codec(index = 19)] InUse, + #[codec(index = 20)] + InvalidAssetNotConcrete, + #[codec(index = 21)] + InvalidAssetUnknownReserve, + #[codec(index = 22)] + InvalidAssetUnsupportedReserve, + #[codec(index = 23)] + TooManyReserves, + #[codec(index = 24)] + LocalExecutionIncomplete, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] - Attempted { outcome: runtime_types::xcm::v3::traits::Outcome }, + Attempted { outcome: runtime_types::staging_xcm::v4::traits::Outcome }, #[codec(index = 1)] Sent { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - message: runtime_types::xcm::v3::Xcm, + origin: runtime_types::staging_xcm::v4::location::Location, + destination: runtime_types::staging_xcm::v4::location::Location, + message: runtime_types::staging_xcm::v4::Xcm, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 2)] UnexpectedResponse { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, }, #[codec(index = 3)] ResponseReady { query_id: ::core::primitive::u64, - response: runtime_types::xcm::v3::Response, + response: runtime_types::staging_xcm::v4::Response, }, #[codec(index = 4)] Notified { @@ -1928,15 +2138,15 @@ pub mod api { }, #[codec(index = 8)] InvalidResponder { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, expected_location: ::core::option::Option< - runtime_types::staging_xcm::v3::multilocation::MultiLocation, + runtime_types::staging_xcm::v4::location::Location, >, }, #[codec(index = 9)] InvalidResponderVersion { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, }, #[codec(index = 10)] @@ -1944,98 +2154,99 @@ pub mod api { #[codec(index = 11)] AssetsTrapped { hash: ::subxt::utils::H256, - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, + origin: runtime_types::staging_xcm::v4::location::Location, + assets: runtime_types::xcm::VersionedAssets, }, #[codec(index = 12)] VersionChangeNotified { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + destination: runtime_types::staging_xcm::v4::location::Location, result: ::core::primitive::u32, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 13)] SupportedVersionChanged { - location: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + location: runtime_types::staging_xcm::v4::location::Location, version: ::core::primitive::u32, }, #[codec(index = 14)] NotifyTargetSendFail { - location: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + location: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, error: runtime_types::xcm::v3::traits::Error, }, #[codec(index = 15)] NotifyTargetMigrationFail { - location: runtime_types::xcm::VersionedMultiLocation, + location: runtime_types::xcm::VersionedLocation, query_id: ::core::primitive::u64, }, #[codec(index = 16)] InvalidQuerierVersion { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, }, #[codec(index = 17)] InvalidQuerier { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, - expected_querier: - runtime_types::staging_xcm::v3::multilocation::MultiLocation, + expected_querier: runtime_types::staging_xcm::v4::location::Location, maybe_actual_querier: ::core::option::Option< - runtime_types::staging_xcm::v3::multilocation::MultiLocation, + runtime_types::staging_xcm::v4::location::Location, >, }, #[codec(index = 18)] VersionNotifyStarted { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + destination: runtime_types::staging_xcm::v4::location::Location, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 19)] VersionNotifyRequested { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + destination: runtime_types::staging_xcm::v4::location::Location, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 20)] VersionNotifyUnrequested { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + destination: runtime_types::staging_xcm::v4::location::Location, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 21)] FeesPaid { - paying: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - fees: runtime_types::xcm::v3::multiasset::MultiAssets, + paying: runtime_types::staging_xcm::v4::location::Location, + fees: runtime_types::staging_xcm::v4::asset::Assets, }, #[codec(index = 22)] AssetsClaimed { hash: ::subxt::utils::H256, - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, + origin: runtime_types::staging_xcm::v4::location::Location, + assets: runtime_types::xcm::VersionedAssets, }, + #[codec(index = 23)] + VersionMigrationFinished { version: ::core::primitive::u32 }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Origin { #[codec(index = 0)] - Xcm(runtime_types::staging_xcm::v3::multilocation::MultiLocation), + Xcm(runtime_types::staging_xcm::v4::location::Location), #[codec(index = 1)] - Response(runtime_types::staging_xcm::v3::multilocation::MultiLocation), + Response(runtime_types::staging_xcm::v4::location::Location), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum QueryStatus<_0> { #[codec(index = 0)] Pending { - responder: runtime_types::xcm::VersionedMultiLocation, + responder: runtime_types::xcm::VersionedLocation, maybe_match_querier: - ::core::option::Option, + ::core::option::Option, maybe_notify: ::core::option::Option<(::core::primitive::u8, ::core::primitive::u8)>, timeout: _0, }, #[codec(index = 1)] VersionNotifier { - origin: runtime_types::xcm::VersionedMultiLocation, + origin: runtime_types::xcm::VersionedLocation, is_active: ::core::primitive::bool, }, #[codec(index = 2)] @@ -2044,8 +2255,8 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct RemoteLockedFungibleRecord<_0> { pub amount: ::core::primitive::u128, - pub owner: runtime_types::xcm::VersionedMultiLocation, - pub locker: runtime_types::xcm::VersionedMultiLocation, + pub owner: runtime_types::xcm::VersionedLocation, + pub locker: runtime_types::xcm::VersionedLocation, pub consumers: runtime_types::bounded_collections::bounded_vec::BoundedVec<( _0, ::core::primitive::u128, @@ -2101,15 +2312,6 @@ pub mod api { pub struct Id(pub ::core::primitive::u32); #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct ValidationCode(pub ::std::vec::Vec<::core::primitive::u8>); - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum XcmpMessageFormat { - #[codec(index = 0)] - ConcatenatedVersionedXcm, - #[codec(index = 1)] - ConcatenatedEncodedBlob, - #[codec(index = 2)] - Signals, - } } } pub mod polkadot_primitives { @@ -2169,339 +2371,1739 @@ pub mod api { } } } - pub mod sp_arithmetic { + pub mod primitive_types { use super::runtime_types; - pub mod fixed_point { - use super::runtime_types; - #[derive( - :: codec :: Decode, - :: codec :: Encode, - :: subxt :: ext :: codec :: CompactAs, - Clone, - Debug, - PartialEq, - )] - pub struct FixedU128(pub ::core::primitive::u128); - } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum ArithmeticError { - #[codec(index = 0)] - Underflow, - #[codec(index = 1)] - Overflow, - #[codec(index = 2)] - DivisionByZero, - } + pub struct U256(pub [::core::primitive::u64; 4usize]); } - pub mod sp_consensus_aura { + pub mod snowbridge_amcl { use super::runtime_types; - pub mod sr25519 { + pub mod bls381 { use super::runtime_types; - pub mod app_sr25519 { + pub mod big { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); + pub struct Big { + pub w: [::core::primitive::i32; 14usize], + } + } + pub mod ecp { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ECP { + pub x: runtime_types::snowbridge_amcl::bls381::fp::FP, + pub y: runtime_types::snowbridge_amcl::bls381::fp::FP, + pub z: runtime_types::snowbridge_amcl::bls381::fp::FP, + } + } + pub mod fp { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct FP { + pub x: runtime_types::snowbridge_amcl::bls381::big::Big, + pub xes: ::core::primitive::i32, + } } } } - pub mod sp_consensus_grandpa { + pub mod snowbridge_beacon_primitives { use super::runtime_types; - pub mod app { + pub mod bls { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub runtime_types::sp_core::ed25519::Public); + pub enum BlsError { + #[codec(index = 0)] + InvalidSignature, + #[codec(index = 1)] + InvalidPublicKey, + #[codec(index = 2)] + InvalidAggregatePublicKeys, + #[codec(index = 3)] + SignatureVerificationFailed, + } + } + pub mod types { + use super::runtime_types; + pub mod deneb { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionPayloadHeader { + pub parent_hash: ::subxt::utils::H256, + pub fee_recipient: ::subxt::utils::H160, + pub state_root: ::subxt::utils::H256, + pub receipts_root: ::subxt::utils::H256, + pub logs_bloom: ::std::vec::Vec<::core::primitive::u8>, + pub prev_randao: ::subxt::utils::H256, + pub block_number: ::core::primitive::u64, + pub gas_limit: ::core::primitive::u64, + pub gas_used: ::core::primitive::u64, + pub timestamp: ::core::primitive::u64, + pub extra_data: ::std::vec::Vec<::core::primitive::u8>, + pub base_fee_per_gas: runtime_types::primitive_types::U256, + pub block_hash: ::subxt::utils::H256, + pub transactions_root: ::subxt::utils::H256, + pub withdrawals_root: ::subxt::utils::H256, + pub blob_gas_used: ::core::primitive::u64, + pub excess_blob_gas: ::core::primitive::u64, + } + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub runtime_types::sp_core::ed25519::Signature); + pub struct BeaconHeader { + pub slot: ::core::primitive::u64, + pub proposer_index: ::core::primitive::u64, + pub parent_root: ::subxt::utils::H256, + pub state_root: ::subxt::utils::H256, + pub body_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CompactBeaconState { + #[codec(compact)] + pub slot: ::core::primitive::u64, + pub block_roots_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CompactExecutionHeader { + pub parent_hash: ::subxt::utils::H256, + #[codec(compact)] + pub block_number: ::core::primitive::u64, + pub state_root: ::subxt::utils::H256, + pub receipts_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionHeaderState { + pub beacon_block_root: ::subxt::utils::H256, + pub beacon_slot: ::core::primitive::u64, + pub block_hash: ::subxt::utils::H256, + pub block_number: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionPayloadHeader { + pub parent_hash: ::subxt::utils::H256, + pub fee_recipient: ::subxt::utils::H160, + pub state_root: ::subxt::utils::H256, + pub receipts_root: ::subxt::utils::H256, + pub logs_bloom: ::std::vec::Vec<::core::primitive::u8>, + pub prev_randao: ::subxt::utils::H256, + pub block_number: ::core::primitive::u64, + pub gas_limit: ::core::primitive::u64, + pub gas_used: ::core::primitive::u64, + pub timestamp: ::core::primitive::u64, + pub extra_data: ::std::vec::Vec<::core::primitive::u8>, + pub base_fee_per_gas: runtime_types::primitive_types::U256, + pub block_hash: ::subxt::utils::H256, + pub transactions_root: ::subxt::utils::H256, + pub withdrawals_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Fork { + pub version: [::core::primitive::u8; 4usize], + pub epoch: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ForkVersions { + pub genesis: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub altair: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub bellatrix: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub capella: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub deneb: runtime_types::snowbridge_beacon_primitives::types::Fork, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct PublicKey(pub [::core::primitive::u8; 48usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 96usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct SyncAggregate { + pub sync_committee_bits: [::core::primitive::u8; 64usize], + pub sync_committee_signature: + runtime_types::snowbridge_beacon_primitives::types::Signature, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct SyncCommittee { + pub pubkeys: + [runtime_types::snowbridge_beacon_primitives::types::PublicKey; 512usize], + pub aggregate_pubkey: + runtime_types::snowbridge_beacon_primitives::types::PublicKey, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct SyncCommitteePrepared { + pub root: ::subxt::utils::H256, + pub pubkeys: ::std::boxed::Box< + [runtime_types::snowbridge_milagro_bls::keys::PublicKey; 512usize], + >, + pub aggregate_pubkey: runtime_types::snowbridge_milagro_bls::keys::PublicKey, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum VersionedExecutionPayloadHeader { + # [codec (index = 0)] Capella (runtime_types :: snowbridge_beacon_primitives :: types :: ExecutionPayloadHeader ,) , # [codec (index = 1)] Deneb (runtime_types :: snowbridge_beacon_primitives :: types :: deneb :: ExecutionPayloadHeader ,) , } + } + pub mod updates { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct AncestryProof { + pub header_branch: ::std::vec::Vec<::subxt::utils::H256>, + pub finalized_block_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CheckpointUpdate { + pub header: runtime_types::snowbridge_beacon_primitives::types::BeaconHeader, + pub current_sync_committee: + runtime_types::snowbridge_beacon_primitives::types::SyncCommittee, + pub current_sync_committee_branch: ::std::vec::Vec<::subxt::utils::H256>, + pub validators_root: ::subxt::utils::H256, + pub block_roots_root: ::subxt::utils::H256, + pub block_roots_branch: ::std::vec::Vec<::subxt::utils::H256>, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionHeaderUpdate { pub header : runtime_types :: snowbridge_beacon_primitives :: types :: BeaconHeader , pub ancestry_proof : :: core :: option :: Option < runtime_types :: snowbridge_beacon_primitives :: updates :: AncestryProof > , pub execution_header : runtime_types :: snowbridge_beacon_primitives :: types :: VersionedExecutionPayloadHeader , pub execution_branch : :: std :: vec :: Vec < :: subxt :: utils :: H256 > , } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct NextSyncCommitteeUpdate { + pub next_sync_committee: + runtime_types::snowbridge_beacon_primitives::types::SyncCommittee, + pub next_sync_committee_branch: ::std::vec::Vec<::subxt::utils::H256>, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Update { pub attested_header : runtime_types :: snowbridge_beacon_primitives :: types :: BeaconHeader , pub sync_aggregate : runtime_types :: snowbridge_beacon_primitives :: types :: SyncAggregate , pub signature_slot : :: core :: primitive :: u64 , pub next_sync_committee_update : :: core :: option :: Option < runtime_types :: snowbridge_beacon_primitives :: updates :: NextSyncCommitteeUpdate > , pub finalized_header : runtime_types :: snowbridge_beacon_primitives :: types :: BeaconHeader , pub finality_branch : :: std :: vec :: Vec < :: subxt :: utils :: H256 > , pub block_roots_root : :: subxt :: utils :: H256 , pub block_roots_branch : :: std :: vec :: Vec < :: subxt :: utils :: H256 > , } } } - pub mod sp_consensus_slots { - use super::runtime_types; - #[derive( - :: codec :: Decode, - :: codec :: Encode, - :: subxt :: ext :: codec :: CompactAs, - Clone, - Debug, - PartialEq, - )] - pub struct Slot(pub ::core::primitive::u64); - #[derive( - :: codec :: Decode, - :: codec :: Encode, - :: subxt :: ext :: codec :: CompactAs, - Clone, - Debug, - PartialEq, - )] - pub struct SlotDuration(pub ::core::primitive::u64); - } - pub mod sp_core { + pub mod snowbridge_core { use super::runtime_types; - pub mod crypto { + pub mod inbound { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct KeyTypeId(pub [::core::primitive::u8; 4usize]); + pub struct Log { + pub address: ::subxt::utils::H160, + pub topics: ::std::vec::Vec<::subxt::utils::H256>, + pub data: ::std::vec::Vec<::core::primitive::u8>, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Message { + pub event_log: runtime_types::snowbridge_core::inbound::Log, + pub proof: runtime_types::snowbridge_core::inbound::Proof, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Proof { + pub block_hash: ::subxt::utils::H256, + pub tx_index: ::core::primitive::u32, + pub data: ( + ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + ), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum VerificationError { + #[codec(index = 0)] + HeaderNotFound, + #[codec(index = 1)] + LogNotFound, + #[codec(index = 2)] + InvalidLog, + #[codec(index = 3)] + InvalidProof, + } } - pub mod ecdsa { + pub mod operating_mode { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub [::core::primitive::u8; 65usize]); + pub enum BasicOperatingMode { + #[codec(index = 0)] + Normal, + #[codec(index = 1)] + Halted, + } } - pub mod ed25519 { + pub mod outbound { use super::runtime_types; + pub mod v1 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AgentExecuteCommand { + #[codec(index = 0)] + TransferToken { + token: ::subxt::utils::H160, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Command { + #[codec(index = 0)] + AgentExecute { + agent_id: ::subxt::utils::H256, + command: + runtime_types::snowbridge_core::outbound::v1::AgentExecuteCommand, + }, + #[codec(index = 1)] + Upgrade { + impl_address: ::subxt::utils::H160, + impl_code_hash: ::subxt::utils::H256, + initializer: ::core::option::Option< + runtime_types::snowbridge_core::outbound::v1::Initializer, + >, + }, + #[codec(index = 2)] + CreateAgent { agent_id: ::subxt::utils::H256 }, + #[codec(index = 3)] + CreateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + agent_id: ::subxt::utils::H256, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 4)] + UpdateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 5)] + SetOperatingMode { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 6)] + TransferNativeFromAgent { + agent_id: ::subxt::utils::H256, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 7)] + SetTokenTransferFees { + create_asset_xcm: ::core::primitive::u128, + transfer_asset_xcm: ::core::primitive::u128, + register_token: runtime_types::primitive_types::U256, + }, + #[codec(index = 8)] + SetPricingParameters { + exchange_rate: runtime_types::snowbridge_core::pricing::UD60x18, + delivery_cost: ::core::primitive::u128, + multiplier: runtime_types::snowbridge_core::pricing::UD60x18, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Initializer { + pub params: ::std::vec::Vec<::core::primitive::u8>, + pub maximum_required_gas: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum OperatingMode { + #[codec(index = 0)] + Normal, + #[codec(index = 1)] + RejectingOutboundMessages, + } + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub [::core::primitive::u8; 32usize]); + pub struct Fee<_0> { + pub local: _0, + pub remote: _0, + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub [::core::primitive::u8; 64usize]); + pub enum SendError { + #[codec(index = 0)] + MessageTooLarge, + #[codec(index = 1)] + Halted, + #[codec(index = 2)] + InvalidChannel, + } } - pub mod sr25519 { + pub mod pricing { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub [::core::primitive::u8; 32usize]); + pub struct PricingParameters<_0> { + pub exchange_rate: runtime_types::sp_arithmetic::fixed_point::FixedU128, + pub rewards: runtime_types::snowbridge_core::pricing::Rewards<_0>, + pub fee_per_gas: runtime_types::primitive_types::U256, + pub multiplier: runtime_types::sp_arithmetic::fixed_point::FixedU128, + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub [::core::primitive::u8; 64usize]); + pub struct Rewards<_0> { + pub local: _0, + pub remote: runtime_types::primitive_types::U256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct UD60x18(pub runtime_types::primitive_types::U256); } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct OpaqueMetadata(pub ::std::vec::Vec<::core::primitive::u8>); + pub struct Channel { + pub agent_id: ::subxt::utils::H256, + pub para_id: runtime_types::polkadot_parachain_primitives::primitives::Id, + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Void {} + pub struct ChannelId(pub [::core::primitive::u8; 32usize]); } - pub mod sp_inherents { + pub mod snowbridge_milagro_bls { use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct CheckInherentsResult { - pub okay: ::core::primitive::bool, - pub fatal_error: ::core::primitive::bool, - pub errors: runtime_types::sp_inherents::InherentData, + pub mod keys { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct PublicKey { + pub point: runtime_types::snowbridge_amcl::bls381::ecp::ECP, + } } + } + pub mod snowbridge_outbound_queue_merkle_tree { + use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct InherentData { - pub data: ::subxt::utils::KeyedVec< - [::core::primitive::u8; 8usize], - ::std::vec::Vec<::core::primitive::u8>, - >, + pub struct MerkleProof { + pub root: ::subxt::utils::H256, + pub proof: ::std::vec::Vec<::subxt::utils::H256>, + pub number_of_leaves: ::core::primitive::u64, + pub leaf_index: ::core::primitive::u64, + pub leaf: ::subxt::utils::H256, } } - pub mod sp_runtime { + pub mod snowbridge_pallet_ethereum_client { use super::runtime_types; - pub mod generic { - use super::runtime_types; - pub mod block { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Block<_0, _1> { - pub header: _0, - pub extrinsics: ::std::vec::Vec<_1>, - } - } - pub mod digest { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum DigestItem { - #[codec(index = 6)] - PreRuntime( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 4)] - Consensus( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 5)] - Seal( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 0)] - Other(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 8)] - RuntimeEnvironmentUpdated, - } - } - } - pub mod transaction_validity { + pub mod pallet { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum InvalidTransaction { + pub enum Call { + # [codec (index = 0)] force_checkpoint { update : :: std :: boxed :: Box < runtime_types :: snowbridge_beacon_primitives :: updates :: CheckpointUpdate > , } , # [codec (index = 1)] submit { update : :: std :: boxed :: Box < runtime_types :: snowbridge_beacon_primitives :: updates :: Update > , } , # [codec (index = 2)] submit_execution_header { update : :: std :: boxed :: Box < runtime_types :: snowbridge_beacon_primitives :: updates :: ExecutionHeaderUpdate > , } , # [codec (index = 3)] set_operating_mode { mode : runtime_types :: snowbridge_core :: operating_mode :: BasicOperatingMode , } , } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { #[codec(index = 0)] - Call, + SkippedSyncCommitteePeriod, #[codec(index = 1)] - Payment, + IrrelevantUpdate, #[codec(index = 2)] - Future, + NotBootstrapped, #[codec(index = 3)] - Stale, + SyncCommitteeParticipantsNotSupermajority, #[codec(index = 4)] - BadProof, + InvalidHeaderMerkleProof, #[codec(index = 5)] - AncientBirthBlock, + InvalidSyncCommitteeMerkleProof, #[codec(index = 6)] - ExhaustsResources, + InvalidExecutionHeaderProof, #[codec(index = 7)] - Custom(::core::primitive::u8), + InvalidAncestryMerkleProof, #[codec(index = 8)] - BadMandatory, + InvalidBlockRootsRootMerkleProof, #[codec(index = 9)] - MandatoryValidation, + InvalidFinalizedHeaderGap, #[codec(index = 10)] - BadSigner, + HeaderNotFinalized, + #[codec(index = 11)] + BlockBodyHashTreeRootFailed, + #[codec(index = 12)] + HeaderHashTreeRootFailed, + #[codec(index = 13)] + SyncCommitteeHashTreeRootFailed, + #[codec(index = 14)] + SigningRootHashTreeRootFailed, + #[codec(index = 15)] + ForkDataHashTreeRootFailed, + #[codec(index = 16)] + ExpectedFinalizedHeaderNotStored, + #[codec(index = 17)] + BLSPreparePublicKeysFailed, + #[codec(index = 18)] + BLSVerificationFailed( + runtime_types::snowbridge_beacon_primitives::bls::BlsError, + ), + #[codec(index = 19)] + InvalidUpdateSlot, + #[codec(index = 20)] + InvalidSyncCommitteeUpdate, + #[codec(index = 21)] + ExecutionHeaderTooFarBehind, + #[codec(index = 22)] + ExecutionHeaderSkippedBlock, + #[codec(index = 23)] + Halted, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + BeaconHeaderImported { + block_hash: ::subxt::utils::H256, + slot: ::core::primitive::u64, + }, + #[codec(index = 1)] + ExecutionHeaderImported { + block_hash: ::subxt::utils::H256, + block_number: ::core::primitive::u64, + }, + #[codec(index = 2)] + SyncCommitteeUpdated { period: ::core::primitive::u64 }, + #[codec(index = 3)] + OperatingModeChanged { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + } + } + pub mod snowbridge_pallet_inbound_queue { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + submit { message: runtime_types::snowbridge_core::inbound::Message }, + #[codec(index = 1)] + set_operating_mode { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + InvalidGateway, + #[codec(index = 1)] + InvalidEnvelope, + #[codec(index = 2)] + InvalidNonce, + #[codec(index = 3)] + InvalidPayload, + #[codec(index = 4)] + InvalidChannel, + #[codec(index = 5)] + MaxNonceReached, + #[codec(index = 6)] + InvalidAccountConversion, + #[codec(index = 7)] + Halted, + #[codec(index = 8)] + Verification(runtime_types::snowbridge_core::inbound::VerificationError), + #[codec(index = 9)] + Send(runtime_types::snowbridge_pallet_inbound_queue::pallet::SendError), + #[codec(index = 10)] + ConvertMessage( + runtime_types::snowbridge_router_primitives::inbound::ConvertMessageError, + ), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + MessageReceived { + channel_id: runtime_types::snowbridge_core::ChannelId, + nonce: ::core::primitive::u64, + message_id: [::core::primitive::u8; 32usize], + fee_burned: ::core::primitive::u128, + }, + #[codec(index = 1)] + OperatingModeChanged { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum SendError { + #[codec(index = 0)] + NotApplicable, + #[codec(index = 1)] + NotRoutable, + #[codec(index = 2)] + Transport, + #[codec(index = 3)] + DestinationUnsupported, + #[codec(index = 4)] + ExceedsMaxMessageSize, + #[codec(index = 5)] + MissingArgument, + #[codec(index = 6)] + Fees, + } + } + } + pub mod snowbridge_pallet_outbound_queue { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + set_operating_mode { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + MessageTooLarge, + #[codec(index = 1)] + Halted, + #[codec(index = 2)] + InvalidChannel, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + MessageQueued { id: ::subxt::utils::H256 }, + #[codec(index = 1)] + MessageAccepted { id: ::subxt::utils::H256, nonce: ::core::primitive::u64 }, + #[codec(index = 2)] + MessagesCommitted { root: ::subxt::utils::H256, count: ::core::primitive::u64 }, + #[codec(index = 3)] + OperatingModeChanged { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + } + pub mod types { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CommittedMessage { + pub channel_id: runtime_types::snowbridge_core::ChannelId, + #[codec(compact)] + pub nonce: ::core::primitive::u64, + pub command: ::core::primitive::u8, + pub params: ::std::vec::Vec<::core::primitive::u8>, + #[codec(compact)] + pub max_dispatch_gas: ::core::primitive::u64, + #[codec(compact)] + pub max_fee_per_gas: ::core::primitive::u128, + #[codec(compact)] + pub reward: ::core::primitive::u128, + pub id: ::subxt::utils::H256, + } + } + } + pub mod snowbridge_pallet_system { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + upgrade { + impl_address: ::subxt::utils::H160, + impl_code_hash: ::subxt::utils::H256, + initializer: ::core::option::Option< + runtime_types::snowbridge_core::outbound::v1::Initializer, + >, + }, + #[codec(index = 1)] + set_operating_mode { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 2)] + set_pricing_parameters { + params: runtime_types::snowbridge_core::pricing::PricingParameters< + ::core::primitive::u128, + >, + }, + #[codec(index = 3)] + create_agent, + #[codec(index = 4)] + create_channel { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 5)] + update_channel { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 6)] + force_update_channel { + channel_id: runtime_types::snowbridge_core::ChannelId, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 7)] + transfer_native_from_agent { + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 8)] + force_transfer_native_from_agent { + location: ::std::boxed::Box, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 9)] + set_token_transfer_fees { + create_asset_xcm: ::core::primitive::u128, + transfer_asset_xcm: ::core::primitive::u128, + register_token: runtime_types::primitive_types::U256, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + LocationConversionFailed, + #[codec(index = 1)] + AgentAlreadyCreated, + #[codec(index = 2)] + NoAgent, + #[codec(index = 3)] + ChannelAlreadyCreated, + #[codec(index = 4)] + NoChannel, + #[codec(index = 5)] + UnsupportedLocationVersion, + #[codec(index = 6)] + InvalidLocation, + #[codec(index = 7)] + Send(runtime_types::snowbridge_core::outbound::SendError), + #[codec(index = 8)] + InvalidTokenTransferFees, + #[codec(index = 9)] + InvalidPricingParameters, + #[codec(index = 10)] + InvalidUpgradeParameters, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + Upgrade { + impl_address: ::subxt::utils::H160, + impl_code_hash: ::subxt::utils::H256, + initializer_params_hash: ::core::option::Option<::subxt::utils::H256>, + }, + #[codec(index = 1)] + CreateAgent { + location: + ::std::boxed::Box, + agent_id: ::subxt::utils::H256, + }, + #[codec(index = 2)] + CreateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + agent_id: ::subxt::utils::H256, + }, + #[codec(index = 3)] + UpdateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 4)] + SetOperatingMode { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 5)] + TransferNativeFromAgent { + agent_id: ::subxt::utils::H256, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 6)] + SetTokenTransferFees { + create_asset_xcm: ::core::primitive::u128, + transfer_asset_xcm: ::core::primitive::u128, + register_token: runtime_types::primitive_types::U256, + }, + #[codec(index = 7)] + PricingParametersChanged { + params: runtime_types::snowbridge_core::pricing::PricingParameters< + ::core::primitive::u128, + >, + }, + } + } + } + pub mod snowbridge_router_primitives { + use super::runtime_types; + pub mod inbound { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum ConvertMessageError { + #[codec(index = 0)] + UnsupportedVersion, + } + } + } + pub mod sp_arithmetic { + use super::runtime_types; + pub mod fixed_point { + use super::runtime_types; + #[derive( + :: codec :: Decode, + :: codec :: Encode, + :: subxt :: ext :: codec :: CompactAs, + Clone, + Debug, + PartialEq, + )] + pub struct FixedU128(pub ::core::primitive::u128); + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum ArithmeticError { + #[codec(index = 0)] + Underflow, + #[codec(index = 1)] + Overflow, + #[codec(index = 2)] + DivisionByZero, + } + } + pub mod sp_consensus_aura { + use super::runtime_types; + pub mod sr25519 { + use super::runtime_types; + pub mod app_sr25519 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + } + } + } + pub mod sp_consensus_grandpa { + use super::runtime_types; + pub mod app { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub runtime_types::sp_core::ed25519::Public); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub runtime_types::sp_core::ed25519::Signature); + } + } + pub mod sp_consensus_slots { + use super::runtime_types; + #[derive( + :: codec :: Decode, + :: codec :: Encode, + :: subxt :: ext :: codec :: CompactAs, + Clone, + Debug, + PartialEq, + )] + pub struct Slot(pub ::core::primitive::u64); + #[derive( + :: codec :: Decode, + :: codec :: Encode, + :: subxt :: ext :: codec :: CompactAs, + Clone, + Debug, + PartialEq, + )] + pub struct SlotDuration(pub ::core::primitive::u64); + } + pub mod sp_core { + use super::runtime_types; + pub mod crypto { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct KeyTypeId(pub [::core::primitive::u8; 4usize]); + } + pub mod ecdsa { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 65usize]); + } + pub mod ed25519 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub [::core::primitive::u8; 32usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 64usize]); + } + pub mod sr25519 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub [::core::primitive::u8; 32usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 64usize]); + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct OpaqueMetadata(pub ::std::vec::Vec<::core::primitive::u8>); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Void {} + } + pub mod sp_inherents { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CheckInherentsResult { + pub okay: ::core::primitive::bool, + pub fatal_error: ::core::primitive::bool, + pub errors: runtime_types::sp_inherents::InherentData, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct InherentData { + pub data: ::subxt::utils::KeyedVec< + [::core::primitive::u8; 8usize], + ::std::vec::Vec<::core::primitive::u8>, + >, + } + } + pub mod sp_runtime { + use super::runtime_types; + pub mod generic { + use super::runtime_types; + pub mod block { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Block<_0, _1> { + pub header: _0, + pub extrinsics: ::std::vec::Vec<_1>, + } + } + pub mod digest { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum DigestItem { + #[codec(index = 6)] + PreRuntime( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 4)] + Consensus( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 5)] + Seal( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 0)] + Other(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 8)] + RuntimeEnvironmentUpdated, + } + } + } + pub mod transaction_validity { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum InvalidTransaction { + #[codec(index = 0)] + Call, + #[codec(index = 1)] + Payment, + #[codec(index = 2)] + Future, + #[codec(index = 3)] + Stale, + #[codec(index = 4)] + BadProof, + #[codec(index = 5)] + AncientBirthBlock, + #[codec(index = 6)] + ExhaustsResources, + #[codec(index = 7)] + Custom(::core::primitive::u8), + #[codec(index = 8)] + BadMandatory, + #[codec(index = 9)] + MandatoryValidation, + #[codec(index = 10)] + BadSigner, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TransactionSource { + #[codec(index = 0)] + InBlock, + #[codec(index = 1)] + Local, + #[codec(index = 2)] + External, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TransactionValidityError { + #[codec(index = 0)] + Invalid(runtime_types::sp_runtime::transaction_validity::InvalidTransaction), + #[codec(index = 1)] + Unknown(runtime_types::sp_runtime::transaction_validity::UnknownTransaction), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum UnknownTransaction { + #[codec(index = 0)] + CannotLookup, + #[codec(index = 1)] + NoUnsignedValidator, + #[codec(index = 2)] + Custom(::core::primitive::u8), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ValidTransaction { + pub priority: ::core::primitive::u64, + pub requires: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub provides: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub longevity: ::core::primitive::u64, + pub propagate: ::core::primitive::bool, + } + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum DispatchError { + #[codec(index = 0)] + Other, + #[codec(index = 1)] + CannotLookup, + #[codec(index = 2)] + BadOrigin, + #[codec(index = 3)] + Module(runtime_types::sp_runtime::ModuleError), + #[codec(index = 4)] + ConsumerRemaining, + #[codec(index = 5)] + NoProviders, + #[codec(index = 6)] + TooManyConsumers, + #[codec(index = 7)] + Token(runtime_types::sp_runtime::TokenError), + #[codec(index = 8)] + Arithmetic(runtime_types::sp_arithmetic::ArithmeticError), + #[codec(index = 9)] + Transactional(runtime_types::sp_runtime::TransactionalError), + #[codec(index = 10)] + Exhausted, + #[codec(index = 11)] + Corruption, + #[codec(index = 12)] + Unavailable, + #[codec(index = 13)] + RootNotAllowed, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ModuleError { + pub index: ::core::primitive::u8, + pub error: [::core::primitive::u8; 4usize], + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum MultiSignature { + #[codec(index = 0)] + Ed25519(runtime_types::sp_core::ed25519::Signature), + #[codec(index = 1)] + Sr25519(runtime_types::sp_core::sr25519::Signature), + #[codec(index = 2)] + Ecdsa(runtime_types::sp_core::ecdsa::Signature), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TokenError { + #[codec(index = 0)] + FundsUnavailable, + #[codec(index = 1)] + OnlyProvider, + #[codec(index = 2)] + BelowMinimum, + #[codec(index = 3)] + CannotCreate, + #[codec(index = 4)] + UnknownAsset, + #[codec(index = 5)] + Frozen, + #[codec(index = 6)] + Unsupported, + #[codec(index = 7)] + CannotCreateHold, + #[codec(index = 8)] + NotExpendable, + #[codec(index = 9)] + Blocked, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TransactionalError { + #[codec(index = 0)] + LimitReached, + #[codec(index = 1)] + NoLayer, + } + } + pub mod sp_trie { + use super::runtime_types; + pub mod storage_proof { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct StorageProof { + pub trie_nodes: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + } + } + } + pub mod sp_version { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct RuntimeVersion { + pub spec_name: ::std::string::String, + pub impl_name: ::std::string::String, + pub authoring_version: ::core::primitive::u32, + pub spec_version: ::core::primitive::u32, + pub impl_version: ::core::primitive::u32, + pub apis: + ::std::vec::Vec<([::core::primitive::u8; 8usize], ::core::primitive::u32)>, + pub transaction_version: ::core::primitive::u32, + pub state_version: ::core::primitive::u8, + } + } + pub mod sp_weights { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct RuntimeDbWeight { + pub read: ::core::primitive::u64, + pub write: ::core::primitive::u64, + } + } + pub mod staging_parachain_info { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call {} + } + } + pub mod staging_xcm { + use super::runtime_types; + pub mod v3 { + use super::runtime_types; + pub mod multilocation { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct MultiLocation { + pub parents: ::core::primitive::u8, + pub interior: runtime_types::xcm::v3::junctions::Junctions, + } + } + } + pub mod v4 { + use super::runtime_types; + pub mod asset { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Asset { + pub id: runtime_types::staging_xcm::v4::asset::AssetId, + pub fun: runtime_types::staging_xcm::v4::asset::Fungibility, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AssetFilter { + #[codec(index = 0)] + Definite(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 1)] + Wild(runtime_types::staging_xcm::v4::asset::WildAsset), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct AssetId(pub runtime_types::staging_xcm::v4::location::Location); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AssetInstance { + #[codec(index = 0)] + Undefined, + #[codec(index = 1)] + Index(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 2)] + Array4([::core::primitive::u8; 4usize]), + #[codec(index = 3)] + Array8([::core::primitive::u8; 8usize]), + #[codec(index = 4)] + Array16([::core::primitive::u8; 16usize]), + #[codec(index = 5)] + Array32([::core::primitive::u8; 32usize]), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Assets( + pub ::std::vec::Vec, + ); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Fungibility { + #[codec(index = 0)] + Fungible(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 1)] + NonFungible(runtime_types::staging_xcm::v4::asset::AssetInstance), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum WildAsset { + #[codec(index = 0)] + All, + #[codec(index = 1)] + AllOf { + id: runtime_types::staging_xcm::v4::asset::AssetId, + fun: runtime_types::staging_xcm::v4::asset::WildFungibility, + }, + #[codec(index = 2)] + AllCounted(#[codec(compact)] ::core::primitive::u32), + #[codec(index = 3)] + AllOfCounted { + id: runtime_types::staging_xcm::v4::asset::AssetId, + fun: runtime_types::staging_xcm::v4::asset::WildFungibility, + #[codec(compact)] + count: ::core::primitive::u32, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum WildFungibility { + #[codec(index = 0)] + Fungible, + #[codec(index = 1)] + NonFungible, + } + } + pub mod junction { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Junction { + #[codec(index = 0)] + Parachain(#[codec(compact)] ::core::primitive::u32), + #[codec(index = 1)] + AccountId32 { + network: ::core::option::Option< + runtime_types::staging_xcm::v4::junction::NetworkId, + >, + id: [::core::primitive::u8; 32usize], + }, + #[codec(index = 2)] + AccountIndex64 { + network: ::core::option::Option< + runtime_types::staging_xcm::v4::junction::NetworkId, + >, + #[codec(compact)] + index: ::core::primitive::u64, + }, + #[codec(index = 3)] + AccountKey20 { + network: ::core::option::Option< + runtime_types::staging_xcm::v4::junction::NetworkId, + >, + key: [::core::primitive::u8; 20usize], + }, + #[codec(index = 4)] + PalletInstance(::core::primitive::u8), + #[codec(index = 5)] + GeneralIndex(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 6)] + GeneralKey { + length: ::core::primitive::u8, + data: [::core::primitive::u8; 32usize], + }, + #[codec(index = 7)] + OnlyChild, + #[codec(index = 8)] + Plurality { + id: runtime_types::xcm::v3::junction::BodyId, + part: runtime_types::xcm::v3::junction::BodyPart, + }, + #[codec(index = 9)] + GlobalConsensus(runtime_types::staging_xcm::v4::junction::NetworkId), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum NetworkId { + #[codec(index = 0)] + ByGenesis([::core::primitive::u8; 32usize]), + #[codec(index = 1)] + ByFork { + block_number: ::core::primitive::u64, + block_hash: [::core::primitive::u8; 32usize], + }, + #[codec(index = 2)] + Polkadot, + #[codec(index = 3)] + Kusama, + #[codec(index = 4)] + Westend, + #[codec(index = 5)] + Rococo, + #[codec(index = 6)] + Wococo, + #[codec(index = 7)] + Ethereum { + #[codec(compact)] + chain_id: ::core::primitive::u64, + }, + #[codec(index = 8)] + BitcoinCore, + #[codec(index = 9)] + BitcoinCash, + #[codec(index = 10)] + PolkadotBulletin, + } + } + pub mod junctions { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Junctions { + #[codec(index = 0)] + Here, + #[codec(index = 1)] + X1([runtime_types::staging_xcm::v4::junction::Junction; 1usize]), + #[codec(index = 2)] + X2([runtime_types::staging_xcm::v4::junction::Junction; 2usize]), + #[codec(index = 3)] + X3([runtime_types::staging_xcm::v4::junction::Junction; 3usize]), + #[codec(index = 4)] + X4([runtime_types::staging_xcm::v4::junction::Junction; 4usize]), + #[codec(index = 5)] + X5([runtime_types::staging_xcm::v4::junction::Junction; 5usize]), + #[codec(index = 6)] + X6([runtime_types::staging_xcm::v4::junction::Junction; 6usize]), + #[codec(index = 7)] + X7([runtime_types::staging_xcm::v4::junction::Junction; 7usize]), + #[codec(index = 8)] + X8([runtime_types::staging_xcm::v4::junction::Junction; 8usize]), + } + } + pub mod location { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Location { + pub parents: ::core::primitive::u8, + pub interior: runtime_types::staging_xcm::v4::junctions::Junctions, + } + } + pub mod traits { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Outcome { + #[codec(index = 0)] + Complete { used: ::sp_weights::Weight }, + #[codec(index = 1)] + Incomplete { + used: ::sp_weights::Weight, + error: runtime_types::xcm::v3::traits::Error, + }, + #[codec(index = 2)] + Error { error: runtime_types::xcm::v3::traits::Error }, + } + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Instruction { + #[codec(index = 0)] + WithdrawAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 1)] + ReserveAssetDeposited(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 2)] + ReceiveTeleportedAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 3)] + QueryResponse { + #[codec(compact)] + query_id: ::core::primitive::u64, + response: runtime_types::staging_xcm::v4::Response, + max_weight: ::sp_weights::Weight, + querier: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, + #[codec(index = 4)] + TransferAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 5)] + TransferReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 6)] + Transact { + origin_kind: runtime_types::xcm::v2::OriginKind, + require_weight_at_most: ::sp_weights::Weight, + call: runtime_types::xcm::double_encoded::DoubleEncoded, + }, + #[codec(index = 7)] + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + max_message_size: ::core::primitive::u32, + #[codec(compact)] + max_capacity: ::core::primitive::u32, + }, + #[codec(index = 8)] + HrmpChannelAccepted { + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 9)] + HrmpChannelClosing { + #[codec(compact)] + initiator: ::core::primitive::u32, + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 10)] + ClearOrigin, + #[codec(index = 11)] + DescendOrigin(runtime_types::staging_xcm::v4::junctions::Junctions), + #[codec(index = 12)] + ReportError(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 13)] + DepositAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 14)] + DepositReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 15)] + ExchangeAsset { + give: runtime_types::staging_xcm::v4::asset::AssetFilter, + want: runtime_types::staging_xcm::v4::asset::Assets, + maximal: ::core::primitive::bool, + }, + #[codec(index = 16)] + InitiateReserveWithdraw { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + reserve: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 17)] + InitiateTeleport { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 18)] + ReportHolding { + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + }, + #[codec(index = 19)] + BuyExecution { + fees: runtime_types::staging_xcm::v4::asset::Asset, + weight_limit: runtime_types::xcm::v3::WeightLimit, + }, + #[codec(index = 20)] + RefundSurplus, + #[codec(index = 21)] + SetErrorHandler(runtime_types::staging_xcm::v4::Xcm), + #[codec(index = 22)] + SetAppendix(runtime_types::staging_xcm::v4::Xcm), + #[codec(index = 23)] + ClearError, + #[codec(index = 24)] + ClaimAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + ticket: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 25)] + Trap(#[codec(compact)] ::core::primitive::u64), + #[codec(index = 26)] + SubscribeVersion { + #[codec(compact)] + query_id: ::core::primitive::u64, + max_response_weight: ::sp_weights::Weight, + }, + #[codec(index = 27)] + UnsubscribeVersion, + #[codec(index = 28)] + BurnAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 29)] + ExpectAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 30)] + ExpectOrigin( + ::core::option::Option, + ), + #[codec(index = 31)] + ExpectError( + ::core::option::Option<( + ::core::primitive::u32, + runtime_types::xcm::v3::traits::Error, + )>, + ), + #[codec(index = 32)] + ExpectTransactStatus(runtime_types::xcm::v3::MaybeErrorCode), + #[codec(index = 33)] + QueryPallet { + module_name: ::std::vec::Vec<::core::primitive::u8>, + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + }, + #[codec(index = 34)] + ExpectPallet { + #[codec(compact)] + index: ::core::primitive::u32, + name: ::std::vec::Vec<::core::primitive::u8>, + module_name: ::std::vec::Vec<::core::primitive::u8>, + #[codec(compact)] + crate_major: ::core::primitive::u32, + #[codec(compact)] + min_crate_minor: ::core::primitive::u32, + }, + #[codec(index = 35)] + ReportTransactStatus(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 36)] + ClearTransactStatus, + #[codec(index = 37)] + UniversalOrigin(runtime_types::staging_xcm::v4::junction::Junction), + #[codec(index = 38)] + ExportMessage { + network: runtime_types::staging_xcm::v4::junction::NetworkId, + destination: runtime_types::staging_xcm::v4::junctions::Junctions, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 39)] + LockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + unlocker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 40)] + UnlockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + target: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 41)] + NoteUnlockable { + asset: runtime_types::staging_xcm::v4::asset::Asset, + owner: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 42)] + RequestUnlock { + asset: runtime_types::staging_xcm::v4::asset::Asset, + locker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 43)] + SetFeesMode { jit_withdraw: ::core::primitive::bool }, + #[codec(index = 44)] + SetTopic([::core::primitive::u8; 32usize]), + #[codec(index = 45)] + ClearTopic, + #[codec(index = 46)] + AliasOrigin(runtime_types::staging_xcm::v4::location::Location), + #[codec(index = 47)] + UnpaidExecution { + weight_limit: runtime_types::xcm::v3::WeightLimit, + check_origin: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Instruction2 { + #[codec(index = 0)] + WithdrawAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 1)] + ReserveAssetDeposited(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 2)] + ReceiveTeleportedAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 3)] + QueryResponse { + #[codec(compact)] + query_id: ::core::primitive::u64, + response: runtime_types::staging_xcm::v4::Response, + max_weight: ::sp_weights::Weight, + querier: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, + #[codec(index = 4)] + TransferAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 5)] + TransferReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 6)] + Transact { + origin_kind: runtime_types::xcm::v2::OriginKind, + require_weight_at_most: ::sp_weights::Weight, + call: runtime_types::xcm::double_encoded::DoubleEncoded2, + }, + #[codec(index = 7)] + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + max_message_size: ::core::primitive::u32, + #[codec(compact)] + max_capacity: ::core::primitive::u32, + }, + #[codec(index = 8)] + HrmpChannelAccepted { + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 9)] + HrmpChannelClosing { + #[codec(compact)] + initiator: ::core::primitive::u32, + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 10)] + ClearOrigin, + #[codec(index = 11)] + DescendOrigin(runtime_types::staging_xcm::v4::junctions::Junctions), + #[codec(index = 12)] + ReportError(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 13)] + DepositAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 14)] + DepositReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 15)] + ExchangeAsset { + give: runtime_types::staging_xcm::v4::asset::AssetFilter, + want: runtime_types::staging_xcm::v4::asset::Assets, + maximal: ::core::primitive::bool, + }, + #[codec(index = 16)] + InitiateReserveWithdraw { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + reserve: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 17)] + InitiateTeleport { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 18)] + ReportHolding { + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + }, + #[codec(index = 19)] + BuyExecution { + fees: runtime_types::staging_xcm::v4::asset::Asset, + weight_limit: runtime_types::xcm::v3::WeightLimit, + }, + #[codec(index = 20)] + RefundSurplus, + #[codec(index = 21)] + SetErrorHandler(runtime_types::staging_xcm::v4::Xcm2), + #[codec(index = 22)] + SetAppendix(runtime_types::staging_xcm::v4::Xcm2), + #[codec(index = 23)] + ClearError, + #[codec(index = 24)] + ClaimAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + ticket: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 25)] + Trap(#[codec(compact)] ::core::primitive::u64), + #[codec(index = 26)] + SubscribeVersion { + #[codec(compact)] + query_id: ::core::primitive::u64, + max_response_weight: ::sp_weights::Weight, + }, + #[codec(index = 27)] + UnsubscribeVersion, + #[codec(index = 28)] + BurnAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 29)] + ExpectAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 30)] + ExpectOrigin( + ::core::option::Option, + ), + #[codec(index = 31)] + ExpectError( + ::core::option::Option<( + ::core::primitive::u32, + runtime_types::xcm::v3::traits::Error, + )>, + ), + #[codec(index = 32)] + ExpectTransactStatus(runtime_types::xcm::v3::MaybeErrorCode), + #[codec(index = 33)] + QueryPallet { + module_name: ::std::vec::Vec<::core::primitive::u8>, + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + }, + #[codec(index = 34)] + ExpectPallet { + #[codec(compact)] + index: ::core::primitive::u32, + name: ::std::vec::Vec<::core::primitive::u8>, + module_name: ::std::vec::Vec<::core::primitive::u8>, + #[codec(compact)] + crate_major: ::core::primitive::u32, + #[codec(compact)] + min_crate_minor: ::core::primitive::u32, + }, + #[codec(index = 35)] + ReportTransactStatus(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 36)] + ClearTransactStatus, + #[codec(index = 37)] + UniversalOrigin(runtime_types::staging_xcm::v4::junction::Junction), + #[codec(index = 38)] + ExportMessage { + network: runtime_types::staging_xcm::v4::junction::NetworkId, + destination: runtime_types::staging_xcm::v4::junctions::Junctions, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 39)] + LockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + unlocker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 40)] + UnlockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + target: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 41)] + NoteUnlockable { + asset: runtime_types::staging_xcm::v4::asset::Asset, + owner: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 42)] + RequestUnlock { + asset: runtime_types::staging_xcm::v4::asset::Asset, + locker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 43)] + SetFeesMode { jit_withdraw: ::core::primitive::bool }, + #[codec(index = 44)] + SetTopic([::core::primitive::u8; 32usize]), + #[codec(index = 45)] + ClearTopic, + #[codec(index = 46)] + AliasOrigin(runtime_types::staging_xcm::v4::location::Location), + #[codec(index = 47)] + UnpaidExecution { + weight_limit: runtime_types::xcm::v3::WeightLimit, + check_origin: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TransactionSource { - #[codec(index = 0)] - InBlock, - #[codec(index = 1)] - Local, - #[codec(index = 2)] - External, + pub struct PalletInfo { + #[codec(compact)] + pub index: ::core::primitive::u32, + pub name: runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + pub module_name: runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + #[codec(compact)] + pub major: ::core::primitive::u32, + #[codec(compact)] + pub minor: ::core::primitive::u32, + #[codec(compact)] + pub patch: ::core::primitive::u32, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TransactionValidityError { - #[codec(index = 0)] - Invalid(runtime_types::sp_runtime::transaction_validity::InvalidTransaction), - #[codec(index = 1)] - Unknown(runtime_types::sp_runtime::transaction_validity::UnknownTransaction), + pub struct QueryResponseInfo { + pub destination: runtime_types::staging_xcm::v4::location::Location, + #[codec(compact)] + pub query_id: ::core::primitive::u64, + pub max_weight: ::sp_weights::Weight, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum UnknownTransaction { + pub enum Response { #[codec(index = 0)] - CannotLookup, + Null, #[codec(index = 1)] - NoUnsignedValidator, + Assets(runtime_types::staging_xcm::v4::asset::Assets), #[codec(index = 2)] - Custom(::core::primitive::u8), + ExecutionResult( + ::core::option::Option<( + ::core::primitive::u32, + runtime_types::xcm::v3::traits::Error, + )>, + ), + #[codec(index = 3)] + Version(::core::primitive::u32), + #[codec(index = 4)] + PalletsInfo( + runtime_types::bounded_collections::bounded_vec::BoundedVec< + runtime_types::staging_xcm::v4::PalletInfo, + >, + ), + #[codec(index = 5)] + DispatchResult(runtime_types::xcm::v3::MaybeErrorCode), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct ValidTransaction { - pub priority: ::core::primitive::u64, - pub requires: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub provides: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub longevity: ::core::primitive::u64, - pub propagate: ::core::primitive::bool, - } - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum DispatchError { - #[codec(index = 0)] - Other, - #[codec(index = 1)] - CannotLookup, - #[codec(index = 2)] - BadOrigin, - #[codec(index = 3)] - Module(runtime_types::sp_runtime::ModuleError), - #[codec(index = 4)] - ConsumerRemaining, - #[codec(index = 5)] - NoProviders, - #[codec(index = 6)] - TooManyConsumers, - #[codec(index = 7)] - Token(runtime_types::sp_runtime::TokenError), - #[codec(index = 8)] - Arithmetic(runtime_types::sp_arithmetic::ArithmeticError), - #[codec(index = 9)] - Transactional(runtime_types::sp_runtime::TransactionalError), - #[codec(index = 10)] - Exhausted, - #[codec(index = 11)] - Corruption, - #[codec(index = 12)] - Unavailable, - #[codec(index = 13)] - RootNotAllowed, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct ModuleError { - pub index: ::core::primitive::u8, - pub error: [::core::primitive::u8; 4usize], - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum MultiSignature { - #[codec(index = 0)] - Ed25519(runtime_types::sp_core::ed25519::Signature), - #[codec(index = 1)] - Sr25519(runtime_types::sp_core::sr25519::Signature), - #[codec(index = 2)] - Ecdsa(runtime_types::sp_core::ecdsa::Signature), - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TokenError { - #[codec(index = 0)] - FundsUnavailable, - #[codec(index = 1)] - OnlyProvider, - #[codec(index = 2)] - BelowMinimum, - #[codec(index = 3)] - CannotCreate, - #[codec(index = 4)] - UnknownAsset, - #[codec(index = 5)] - Frozen, - #[codec(index = 6)] - Unsupported, - #[codec(index = 7)] - CannotCreateHold, - #[codec(index = 8)] - NotExpendable, - #[codec(index = 9)] - Blocked, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TransactionalError { - #[codec(index = 0)] - LimitReached, - #[codec(index = 1)] - NoLayer, - } - } - pub mod sp_trie { - use super::runtime_types; - pub mod storage_proof { - use super::runtime_types; + pub struct Xcm(pub ::std::vec::Vec); #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct StorageProof { - pub trie_nodes: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - } - } - } - pub mod sp_version { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct RuntimeVersion { - pub spec_name: ::std::string::String, - pub impl_name: ::std::string::String, - pub authoring_version: ::core::primitive::u32, - pub spec_version: ::core::primitive::u32, - pub impl_version: ::core::primitive::u32, - pub apis: - ::std::vec::Vec<([::core::primitive::u8; 8usize], ::core::primitive::u32)>, - pub transaction_version: ::core::primitive::u32, - pub state_version: ::core::primitive::u8, - } - } - pub mod sp_weights { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct RuntimeDbWeight { - pub read: ::core::primitive::u64, - pub write: ::core::primitive::u64, - } - } - pub mod staging_xcm { - use super::runtime_types; - pub mod v3 { - use super::runtime_types; - pub mod multilocation { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct MultiLocation { - pub parents: ::core::primitive::u8, - pub interior: runtime_types::xcm::v3::junctions::Junctions, - } - } + pub struct Xcm2(pub ::std::vec::Vec); } } pub mod xcm { @@ -3276,6 +4878,8 @@ pub mod api { BitcoinCore, #[codec(index = 9)] BitcoinCash, + #[codec(index = 10)] + PolkadotBulletin, } } pub mod junctions { @@ -3503,15 +5107,6 @@ pub mod api { #[codec(index = 39)] ExceedsStackLimit, } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Outcome { - #[codec(index = 0)] - Complete(::sp_weights::Weight), - #[codec(index = 1)] - Incomplete(::sp_weights::Weight, runtime_types::xcm::v3::traits::Error), - #[codec(index = 2)] - Error(runtime_types::xcm::v3::traits::Error), - } } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Instruction { @@ -4012,20 +5607,26 @@ pub mod api { pub enum VersionedAssetId { #[codec(index = 3)] V3(runtime_types::xcm::v3::multiasset::AssetId), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::asset::AssetId), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum VersionedMultiAssets { + pub enum VersionedAssets { #[codec(index = 1)] V2(runtime_types::xcm::v2::multiasset::MultiAssets), #[codec(index = 3)] V3(runtime_types::xcm::v3::multiasset::MultiAssets), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::asset::Assets), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum VersionedMultiLocation { + pub enum VersionedLocation { #[codec(index = 1)] V2(runtime_types::xcm::v2::multilocation::MultiLocation), #[codec(index = 3)] V3(runtime_types::staging_xcm::v3::multilocation::MultiLocation), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::location::Location), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum VersionedResponse { @@ -4033,6 +5634,8 @@ pub mod api { V2(runtime_types::xcm::v2::Response), #[codec(index = 3)] V3(runtime_types::xcm::v3::Response), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::Response), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum VersionedXcm { @@ -4040,6 +5643,8 @@ pub mod api { V2(runtime_types::xcm::v2::Xcm), #[codec(index = 3)] V3(runtime_types::xcm::v3::Xcm), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::Xcm), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum VersionedXcm2 { @@ -4047,6 +5652,8 @@ pub mod api { V2(runtime_types::xcm::v2::Xcm2), #[codec(index = 3)] V3(runtime_types::xcm::v3::Xcm2), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::Xcm2), } } } diff --git a/relays/client-bridge-hub-kusama/src/lib.rs b/relay-clients/client-bridge-hub-kusama/src/lib.rs similarity index 86% rename from relays/client-bridge-hub-kusama/src/lib.rs rename to relay-clients/client-bridge-hub-kusama/src/lib.rs index 1e67a96cb0e1dba68bb80c12bfa68e20678056f4..ab2e51291cce7628fb730e7045c0093f6d5a317f 100644 --- a/relays/client-bridge-hub-kusama/src/lib.rs +++ b/relay-clients/client-bridge-hub-kusama/src/lib.rs @@ -18,13 +18,14 @@ pub mod codegen_runtime; -use bp_bridge_hub_kusama::{TransactionExtension, AVERAGE_BLOCK_INTERVAL}; -use bp_polkadot::SuffixedCommonTransactionExtensionExt; +use bp_bridge_hub_kusama::{SignedExtension, AVERAGE_BLOCK_INTERVAL}; +use bp_polkadot::SuffixedCommonSignedExtensionExt; use codec::Encode; use relay_substrate_client::{ calls::UtilityCall as MockUtilityCall, Chain, ChainWithBalances, ChainWithMessages, - ChainWithTransactions, ChainWithUtilityPallet, Error as SubstrateError, - MockedRuntimeUtilityPallet, SignParam, UnderlyingChainProvider, UnsignedTransaction, + ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, + Error as SubstrateError, MockedRuntimeUtilityPallet, SignParam, SimpleRuntimeVersion, + UnderlyingChainProvider, UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; @@ -36,8 +37,7 @@ pub type RuntimeCall = runtime_types::bridge_hub_kusama_runtime::RuntimeCall; pub type BridgeMessagesCall = runtime_types::pallet_bridge_messages::pallet::Call; pub type BridgeGrandpaCall = runtime_types::pallet_bridge_grandpa::pallet::Call; pub type BridgeParachainCall = runtime_types::pallet_bridge_parachains::pallet::Call; -type UncheckedExtrinsic = - bp_bridge_hub_kusama::UncheckedExtrinsic; +type UncheckedExtrinsic = bp_bridge_hub_kusama::UncheckedExtrinsic; type UtilityCall = runtime_types::pallet_utility::pallet::Call; /// Kusama chain definition @@ -89,7 +89,7 @@ impl ChainWithTransactions for BridgeHubKusama { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - TransactionExtension::from_params( + SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -122,3 +122,8 @@ impl ChainWithMessages for BridgeHubKusama { const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = bp_bridge_hub_kusama::FROM_BRIDGE_HUB_KUSAMA_MESSAGE_DETAILS_METHOD; } + +impl ChainWithRuntimeVersion for BridgeHubKusama { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_002_000, transaction_version: 4 }); +} diff --git a/relays/client-bridge-hub-kusama/src/runtime_wrapper.rs b/relay-clients/client-bridge-hub-kusama/src/runtime_wrapper.rs similarity index 100% rename from relays/client-bridge-hub-kusama/src/runtime_wrapper.rs rename to relay-clients/client-bridge-hub-kusama/src/runtime_wrapper.rs diff --git a/relay-clients/client-bridge-hub-polkadot/Cargo.toml b/relay-clients/client-bridge-hub-polkadot/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..14671ce8f36bd6d676cfb33c862b12d34f8a7f6f --- /dev/null +++ b/relay-clients/client-bridge-hub-polkadot/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "relay-bridge-hub-polkadot-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-bridge-hub-polkadot = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-header-chain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-messages = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-kusama = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bridge-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-bridge-hub-polkadot/src/codegen_runtime.rs b/relay-clients/client-bridge-hub-polkadot/src/codegen_runtime.rs similarity index 67% rename from relays/client-bridge-hub-polkadot/src/codegen_runtime.rs rename to relay-clients/client-bridge-hub-polkadot/src/codegen_runtime.rs index 1ce9d0588024a9ab231670ea4391a63dabe87fbc..26dd02291f40561ea756d20f71e263c9307901f3 100644 --- a/relays/client-bridge-hub-polkadot/src/codegen_runtime.rs +++ b/relay-clients/client-bridge-hub-polkadot/src/codegen_runtime.rs @@ -17,7 +17,7 @@ //! Autogenerated runtime API //! THIS FILE WAS AUTOGENERATED USING parity-bridges-common::runtime-codegen //! EXECUTED COMMAND: target/debug/runtime-codegen --from-node-url -//! wss://polkadot-bridge-hub-rpc.polkadot.io +//! wss://polkadot-bridge-hub-rpc.polkadot.io/ #[allow(dead_code, unused_imports, non_camel_case_types)] #[allow(clippy::all)] @@ -31,6 +31,11 @@ pub mod api { use super::runtime_types; pub mod bounded_collections { use super::runtime_types; + pub mod bounded_btree_set { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct BoundedBTreeSet<_0>(pub ::std::vec::Vec<_0>); + } pub mod bounded_vec { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -244,6 +249,23 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct StrippableError; } + pub mod bridge_hub_common { + use super::runtime_types; + pub mod message_queue { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AggregateMessageOrigin { + #[codec(index = 0)] + Here, + #[codec(index = 1)] + Parent, + #[codec(index = 2)] + Sibling(runtime_types::polkadot_parachain_primitives::primitives::Id), + #[codec(index = 3)] + Snowbridge(runtime_types::snowbridge_core::ChannelId), + } + } + } pub mod bridge_hub_polkadot_runtime { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -273,6 +295,8 @@ pub mod api { ParachainSystem(runtime_types::cumulus_pallet_parachain_system::pallet::Call), #[codec(index = 2)] Timestamp(runtime_types::pallet_timestamp::pallet::Call), + #[codec(index = 3)] + ParachainInfo(runtime_types::staging_parachain_info::pallet::Call), #[codec(index = 10)] Balances(runtime_types::pallet_balances::pallet::Call), #[codec(index = 21)] @@ -283,6 +307,8 @@ pub mod api { XcmpQueue(runtime_types::cumulus_pallet_xcmp_queue::pallet::Call), #[codec(index = 31)] PolkadotXcm(runtime_types::pallet_xcm::pallet::Call), + #[codec(index = 32)] + CumulusXcm(runtime_types::cumulus_pallet_xcm::pallet::Call), #[codec(index = 33)] DmpQueue(runtime_types::cumulus_pallet_dmp_queue::pallet::Call), #[codec(index = 40)] @@ -297,6 +323,20 @@ pub mod api { BridgeKusamaParachains(runtime_types::pallet_bridge_parachains::pallet::Call), #[codec(index = 53)] BridgeKusamaMessages(runtime_types::pallet_bridge_messages::pallet::Call), + #[codec(index = 80)] + EthereumInboundQueue(runtime_types::snowbridge_pallet_inbound_queue::pallet::Call), + #[codec(index = 81)] + EthereumOutboundQueue( + runtime_types::snowbridge_pallet_outbound_queue::pallet::Call, + ), + #[codec(index = 82)] + EthereumBeaconClient( + runtime_types::snowbridge_pallet_ethereum_client::pallet::Call, + ), + #[codec(index = 83)] + EthereumSystem(runtime_types::snowbridge_pallet_system::pallet::Call), + #[codec(index = 175)] + MessageQueue(runtime_types::pallet_message_queue::pallet::Call), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum RuntimeError { @@ -314,10 +354,6 @@ pub mod api { XcmpQueue(runtime_types::cumulus_pallet_xcmp_queue::pallet::Error), #[codec(index = 31)] PolkadotXcm(runtime_types::pallet_xcm::pallet::Error), - #[codec(index = 32)] - CumulusXcm(runtime_types::cumulus_pallet_xcm::pallet::Error), - #[codec(index = 33)] - DmpQueue(runtime_types::cumulus_pallet_dmp_queue::pallet::Error), #[codec(index = 40)] Utility(runtime_types::pallet_utility::pallet::Error), #[codec(index = 41)] @@ -330,6 +366,20 @@ pub mod api { BridgeKusamaParachains(runtime_types::pallet_bridge_parachains::pallet::Error), #[codec(index = 53)] BridgeKusamaMessages(runtime_types::pallet_bridge_messages::pallet::Error), + #[codec(index = 80)] + EthereumInboundQueue(runtime_types::snowbridge_pallet_inbound_queue::pallet::Error), + #[codec(index = 81)] + EthereumOutboundQueue( + runtime_types::snowbridge_pallet_outbound_queue::pallet::Error, + ), + #[codec(index = 82)] + EthereumBeaconClient( + runtime_types::snowbridge_pallet_ethereum_client::pallet::Error, + ), + #[codec(index = 83)] + EthereumSystem(runtime_types::snowbridge_pallet_system::pallet::Error), + #[codec(index = 175)] + MessageQueue(runtime_types::pallet_message_queue::pallet::Error), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum RuntimeEvent { @@ -365,6 +415,20 @@ pub mod api { BridgeKusamaParachains(runtime_types::pallet_bridge_parachains::pallet::Event), #[codec(index = 53)] BridgeKusamaMessages(runtime_types::pallet_bridge_messages::pallet::Event), + #[codec(index = 80)] + EthereumInboundQueue(runtime_types::snowbridge_pallet_inbound_queue::pallet::Event), + #[codec(index = 81)] + EthereumOutboundQueue( + runtime_types::snowbridge_pallet_outbound_queue::pallet::Event, + ), + #[codec(index = 82)] + EthereumBeaconClient( + runtime_types::snowbridge_pallet_ethereum_client::pallet::Event, + ), + #[codec(index = 83)] + EthereumSystem(runtime_types::snowbridge_pallet_system::pallet::Event), + #[codec(index = 175)] + MessageQueue(runtime_types::pallet_message_queue::pallet::Event), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum RuntimeHoldReason {} @@ -392,7 +456,7 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct RefundBridgedParachainMessages; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct RefundTransactionExtensionAdapter<_0>(pub _0); + pub struct RefundSignedExtensionAdapter<_0>(pub _0); } } pub mod cumulus_pallet_dmp_queue { @@ -400,65 +464,56 @@ pub mod api { pub mod pallet { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Call { - #[codec(index = 0)] - service_overweight { - index: ::core::primitive::u64, - weight_limit: ::sp_weights::Weight, - }, - } + pub enum Call {} #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Error { + pub enum Event { #[codec(index = 0)] - Unknown, + StartedExport, #[codec(index = 1)] - OverLimit, + Exported { page: ::core::primitive::u32 }, + #[codec(index = 2)] + ExportFailed { page: ::core::primitive::u32 }, + #[codec(index = 3)] + CompletedExport, + #[codec(index = 4)] + StartedOverweightExport, + #[codec(index = 5)] + ExportedOverweight { index: ::core::primitive::u64 }, + #[codec(index = 6)] + ExportOverweightFailed { index: ::core::primitive::u64 }, + #[codec(index = 7)] + CompletedOverweightExport, + #[codec(index = 8)] + StartedCleanup, + #[codec(index = 9)] + CleanedSome { keys_removed: ::core::primitive::u32 }, + #[codec(index = 10)] + Completed { error: ::core::primitive::bool }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Event { + pub enum MigrationState { #[codec(index = 0)] - InvalidFormat { message_hash: [::core::primitive::u8; 32usize] }, + NotStarted, #[codec(index = 1)] - UnsupportedVersion { message_hash: [::core::primitive::u8; 32usize] }, + StartedExport { next_begin_used: ::core::primitive::u32 }, #[codec(index = 2)] - ExecutedDownward { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - outcome: runtime_types::xcm::v3::traits::Outcome, - }, + CompletedExport, #[codec(index = 3)] - WeightExhausted { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - remaining_weight: ::sp_weights::Weight, - required_weight: ::sp_weights::Weight, - }, + StartedOverweightExport { next_overweight_index: ::core::primitive::u64 }, #[codec(index = 4)] - OverweightEnqueued { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - overweight_index: ::core::primitive::u64, - required_weight: ::sp_weights::Weight, - }, + CompletedOverweightExport, #[codec(index = 5)] - OverweightServiced { - overweight_index: ::core::primitive::u64, - weight_used: ::sp_weights::Weight, + StartedCleanup { + cursor: ::core::option::Option< + runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + >, }, #[codec(index = 6)] - MaxMessagesExhausted { message_hash: [::core::primitive::u8; 32usize] }, + Completed, } } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct ConfigData { - pub max_individual: ::sp_weights::Weight, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct PageIndexData { - pub begin_used: ::core::primitive::u32, - pub end_used: ::core::primitive::u32, - pub overweight_count: ::core::primitive::u64, - } } pub mod cumulus_pallet_parachain_system { use super::runtime_types; @@ -495,15 +550,13 @@ pub mod api { #[codec(index = 2)] ValidationFunctionDiscarded, #[codec(index = 3)] - UpgradeAuthorized { code_hash: ::subxt::utils::H256 }, - #[codec(index = 4)] DownwardMessagesReceived { count: ::core::primitive::u32 }, - #[codec(index = 5)] + #[codec(index = 4)] DownwardMessagesProcessed { weight_used: ::sp_weights::Weight, dmq_head: ::subxt::utils::H256, }, - #[codec(index = 6)] + #[codec(index = 5)] UpwardMessageSent { message_hash: ::core::option::Option<[::core::primitive::u8; 32usize]>, }, @@ -533,18 +586,13 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct UsedBandwidth { pub ump_msg_count : :: core :: primitive :: u32 , pub ump_total_bytes : :: core :: primitive :: u32 , pub hrmp_outgoing : :: subxt :: utils :: KeyedVec < runtime_types :: polkadot_parachain_primitives :: primitives :: Id , runtime_types :: cumulus_pallet_parachain_system :: unincluded_segment :: HrmpChannelUpdate > , } } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct CodeUpgradeAuthorization { - pub code_hash: ::subxt::utils::H256, - pub check_version: ::core::primitive::bool, - } } pub mod cumulus_pallet_xcm { use super::runtime_types; pub mod pallet { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Error {} + pub enum Call {} #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] @@ -554,7 +602,7 @@ pub mod api { #[codec(index = 2)] ExecutedDownward( [::core::primitive::u8; 32usize], - runtime_types::xcm::v3::traits::Outcome, + runtime_types::staging_xcm::v4::traits::Outcome, ), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -572,11 +620,6 @@ pub mod api { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Call { - #[codec(index = 0)] - service_overweight { - index: ::core::primitive::u64, - weight_limit: ::sp_weights::Weight, - }, #[codec(index = 1)] suspend_xcm_execution, #[codec(index = 2)] @@ -587,75 +630,23 @@ pub mod api { update_drop_threshold { new: ::core::primitive::u32 }, #[codec(index = 5)] update_resume_threshold { new: ::core::primitive::u32 }, - #[codec(index = 6)] - update_threshold_weight { new: ::sp_weights::Weight }, - #[codec(index = 7)] - update_weight_restrict_decay { new: ::sp_weights::Weight }, - #[codec(index = 8)] - update_xcmp_max_individual_weight { new: ::sp_weights::Weight }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { #[codec(index = 0)] - FailedToSend, + BadQueueConfig, #[codec(index = 1)] - BadXcmOrigin, + AlreadySuspended, #[codec(index = 2)] - BadXcm, - #[codec(index = 3)] - BadOverweightIndex, - #[codec(index = 4)] - WeightOverLimit, + AlreadyResumed, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] - Success { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - weight: ::sp_weights::Weight, - }, - #[codec(index = 1)] - Fail { - message_hash: [::core::primitive::u8; 32usize], - message_id: [::core::primitive::u8; 32usize], - error: runtime_types::xcm::v3::traits::Error, - weight: ::sp_weights::Weight, - }, - #[codec(index = 2)] - BadVersion { message_hash: [::core::primitive::u8; 32usize] }, - #[codec(index = 3)] - BadFormat { message_hash: [::core::primitive::u8; 32usize] }, - #[codec(index = 4)] XcmpMessageSent { message_hash: [::core::primitive::u8; 32usize] }, - #[codec(index = 5)] - OverweightEnqueued { - sender: runtime_types::polkadot_parachain_primitives::primitives::Id, - sent_at: ::core::primitive::u32, - index: ::core::primitive::u64, - required: ::sp_weights::Weight, - }, - #[codec(index = 6)] - OverweightServiced { index: ::core::primitive::u64, used: ::sp_weights::Weight }, } } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct InboundChannelDetails { - pub sender: runtime_types::polkadot_parachain_primitives::primitives::Id, - pub state: runtime_types::cumulus_pallet_xcmp_queue::InboundState, - pub message_metadata: ::std::vec::Vec<( - ::core::primitive::u32, - runtime_types::polkadot_parachain_primitives::primitives::XcmpMessageFormat, - )>, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum InboundState { - #[codec(index = 0)] - Ok, - #[codec(index = 1)] - Suspended, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct OutboundChannelDetails { pub recipient: runtime_types::polkadot_parachain_primitives::primitives::Id, pub state: runtime_types::cumulus_pallet_xcmp_queue::OutboundState, @@ -675,9 +666,6 @@ pub mod api { pub suspend_threshold: ::core::primitive::u32, pub drop_threshold: ::core::primitive::u32, pub resume_threshold: ::core::primitive::u32, - pub threshold_weight: ::sp_weights::Weight, - pub weight_restrict_decay: ::sp_weights::Weight, - pub xcmp_max_individual_weight: ::sp_weights::Weight, } } pub mod cumulus_primitives_core { @@ -791,6 +779,22 @@ pub mod api { } pub mod traits { use super::runtime_types; + pub mod messages { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum ProcessMessageError { + #[codec(index = 0)] + BadFormat, + #[codec(index = 1)] + Corrupt, + #[codec(index = 2)] + Unsupported, + #[codec(index = 3)] + Overweight(::sp_weights::Weight), + #[codec(index = 4)] + Yield, + } + } pub mod tokens { use super::runtime_types; pub mod misc { @@ -900,6 +904,12 @@ pub mod api { }, #[codec(index = 7)] remark_with_event { remark: ::std::vec::Vec<::core::primitive::u8> }, + #[codec(index = 9)] + authorize_upgrade { code_hash: ::subxt::utils::H256 }, + #[codec(index = 10)] + authorize_upgrade_without_checks { code_hash: ::subxt::utils::H256 }, + #[codec(index = 11)] + apply_authorized_upgrade { code: ::std::vec::Vec<::core::primitive::u8> }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { @@ -915,6 +925,10 @@ pub mod api { NonZeroRefCount, #[codec(index = 5)] CallFiltered, + #[codec(index = 6)] + NothingAuthorized, + #[codec(index = 7)] + Unauthorized, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { @@ -935,6 +949,11 @@ pub mod api { KilledAccount { account: ::sp_core::crypto::AccountId32 }, #[codec(index = 5)] Remarked { sender: ::sp_core::crypto::AccountId32, hash: ::subxt::utils::H256 }, + #[codec(index = 6)] + UpgradeAuthorized { + code_hash: ::subxt::utils::H256, + check_version: ::core::primitive::bool, + }, } } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] @@ -946,6 +965,11 @@ pub mod api { pub data: _1, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CodeUpgradeAuthorization { + pub code_hash: ::subxt::utils::H256, + pub check_version: ::core::primitive::bool, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct EventRecord<_0, _1> { pub phase: runtime_types::frame_system::Phase, pub event: _0, @@ -1010,6 +1034,12 @@ pub mod api { #[codec(compact)] new_free: ::core::primitive::u128, }, + #[codec(index = 9)] + force_adjust_total_issuance { + direction: runtime_types::pallet_balances::types::AdjustmentDirection, + #[codec(compact)] + delta: ::core::primitive::u128, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { @@ -1033,6 +1063,10 @@ pub mod api { TooManyHolds, #[codec(index = 9)] TooManyFreezes, + #[codec(index = 10)] + IssuanceDeactivated, + #[codec(index = 11)] + DeltaZero, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { @@ -1115,6 +1149,11 @@ pub mod api { Frozen { who: ::sp_core::crypto::AccountId32, amount: ::core::primitive::u128 }, #[codec(index = 20)] Thawed { who: ::sp_core::crypto::AccountId32, amount: ::core::primitive::u128 }, + #[codec(index = 21)] + TotalIssuanceForced { + old: ::core::primitive::u128, + new: ::core::primitive::u128, + }, } } pub mod types { @@ -1127,6 +1166,13 @@ pub mod api { pub flags: runtime_types::pallet_balances::types::ExtraFlags, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AdjustmentDirection { + #[codec(index = 0)] + Increase, + #[codec(index = 1)] + Decrease, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct BalanceLock<_0> { pub id: [::core::primitive::u8; 8usize], pub amount: _0, @@ -1271,7 +1317,7 @@ pub mod api { # [codec (index = 0)] set_owner { new_owner : :: core :: option :: Option < :: sp_core :: crypto :: AccountId32 > , } , # [codec (index = 1)] set_operating_mode { operating_mode : runtime_types :: bp_messages :: MessagesOperatingMode , } , # [codec (index = 2)] receive_messages_proof { relayer_id_at_bridged_chain : :: sp_core :: crypto :: AccountId32 , proof : :: bridge_runtime_common :: messages :: target :: FromBridgedChainMessagesProof < :: subxt :: utils :: H256 > , messages_count : :: core :: primitive :: u32 , dispatch_weight : :: sp_weights :: Weight , } , # [codec (index = 3)] receive_messages_delivery_proof { proof : :: bridge_runtime_common :: messages :: source :: FromBridgedChainMessagesDeliveryProof < :: subxt :: utils :: H256 > , relayers_state : :: bp_messages :: UnrewardedRelayersState , } , } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { - # [codec (index = 0)] NotOperatingNormally , # [codec (index = 1)] InactiveOutboundLane , # [codec (index = 2)] MessageDispatchInactive , # [codec (index = 3)] MessageRejectedByChainVerifier (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 4)] MessageRejectedByLaneVerifier (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 5)] MessageRejectedByPallet (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 6)] FailedToWithdrawMessageFee , # [codec (index = 7)] TooManyMessagesInTheProof , # [codec (index = 8)] InvalidMessagesProof , # [codec (index = 9)] InvalidMessagesDeliveryProof , # [codec (index = 10)] InvalidUnrewardedRelayersState , # [codec (index = 11)] InsufficientDispatchWeight , # [codec (index = 12)] MessageIsNotYetSent , # [codec (index = 13)] ReceivalConfirmation (runtime_types :: pallet_bridge_messages :: outbound_lane :: ReceivalConfirmationError ,) , # [codec (index = 14)] BridgeModule (runtime_types :: bp_runtime :: OwnedBridgeModuleError ,) , } + # [codec (index = 0)] NotOperatingNormally , # [codec (index = 1)] InactiveOutboundLane , # [codec (index = 2)] MessageDispatchInactive , # [codec (index = 3)] MessageRejectedByChainVerifier (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 4)] MessageRejectedByPallet (runtime_types :: bp_messages :: VerificationError ,) , # [codec (index = 5)] FailedToWithdrawMessageFee , # [codec (index = 6)] TooManyMessagesInTheProof , # [codec (index = 7)] InvalidMessagesProof , # [codec (index = 8)] InvalidMessagesDeliveryProof , # [codec (index = 9)] InvalidUnrewardedRelayersState , # [codec (index = 10)] InsufficientDispatchWeight , # [codec (index = 11)] MessageIsNotYetSent , # [codec (index = 12)] ReceivalConfirmation (runtime_types :: pallet_bridge_messages :: outbound_lane :: ReceivalConfirmationError ,) , # [codec (index = 13)] BridgeModule (runtime_types :: bp_runtime :: OwnedBridgeModuleError ,) , } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { # [codec (index = 0)] MessageAccepted { lane_id : runtime_types :: bp_messages :: LaneId , nonce : :: core :: primitive :: u64 , } , # [codec (index = 1)] MessagesReceived (:: std :: vec :: Vec < runtime_types :: bp_messages :: ReceivedMessages < runtime_types :: bridge_runtime_common :: messages_xcm_extension :: XcmBlobMessageDispatchResult > > ,) , # [codec (index = 2)] MessagesDelivered { lane_id : runtime_types :: bp_messages :: LaneId , messages : runtime_types :: bp_messages :: DeliveredMessages , } , } @@ -1378,12 +1424,18 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] - RewardPaid { + RewardRegistered { relayer: ::sp_core::crypto::AccountId32, rewards_account_params: runtime_types::bp_relayers::RewardsAccountParams, reward: ::core::primitive::u128, }, #[codec(index = 1)] + RewardPaid { + relayer: ::sp_core::crypto::AccountId32, + rewards_account_params: runtime_types::bp_relayers::RewardsAccountParams, + reward: ::core::primitive::u128, + }, + #[codec(index = 2)] RegistrationUpdated { relayer: ::sp_core::crypto::AccountId32, registration: runtime_types::bp_relayers::registration::Registration< @@ -1391,9 +1443,9 @@ pub mod api { ::core::primitive::u128, >, }, - #[codec(index = 2)] - Deregistered { relayer: ::sp_core::crypto::AccountId32 }, #[codec(index = 3)] + Deregistered { relayer: ::sp_core::crypto::AccountId32 }, + #[codec(index = 4)] SlashedAndDeregistered { relayer: ::sp_core::crypto::AccountId32, registration: runtime_types::bp_relayers::registration::Registration< @@ -1424,6 +1476,13 @@ pub mod api { add_invulnerable { who: ::sp_core::crypto::AccountId32 }, #[codec(index = 6)] remove_invulnerable { who: ::sp_core::crypto::AccountId32 }, + #[codec(index = 7)] + update_bond { new_deposit: ::core::primitive::u128 }, + #[codec(index = 8)] + take_candidate_slot { + deposit: ::core::primitive::u128, + target: ::sp_core::crypto::AccountId32, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct CandidateInfo<_0, _1> { @@ -1450,6 +1509,22 @@ pub mod api { NoAssociatedValidatorId, #[codec(index = 8)] ValidatorNotRegistered, + #[codec(index = 9)] + InsertToCandidateListFailed, + #[codec(index = 10)] + RemoveFromCandidateListFailed, + #[codec(index = 11)] + DepositTooLow, + #[codec(index = 12)] + UpdateCandidateListFailed, + #[codec(index = 13)] + InsufficientBond, + #[codec(index = 14)] + TargetIsNotCandidate, + #[codec(index = 15)] + IdenticalDeposit, + #[codec(index = 16)] + InvalidUnreserve, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { @@ -1471,12 +1546,125 @@ pub mod api { deposit: ::core::primitive::u128, }, #[codec(index = 6)] - CandidateRemoved { account_id: ::sp_core::crypto::AccountId32 }, + CandidateBondUpdated { + account_id: ::sp_core::crypto::AccountId32, + deposit: ::core::primitive::u128, + }, #[codec(index = 7)] + CandidateRemoved { account_id: ::sp_core::crypto::AccountId32 }, + #[codec(index = 8)] + CandidateReplaced { + old: ::sp_core::crypto::AccountId32, + new: ::sp_core::crypto::AccountId32, + deposit: ::core::primitive::u128, + }, + #[codec(index = 9)] InvalidInvulnerableSkipped { account_id: ::sp_core::crypto::AccountId32 }, } } } + pub mod pallet_message_queue { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + reap_page { + message_origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + page_index: ::core::primitive::u32, + }, + #[codec(index = 1)] + execute_overweight { + message_origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + page: ::core::primitive::u32, + index: ::core::primitive::u32, + weight_limit: ::sp_weights::Weight, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + NotReapable, + #[codec(index = 1)] + NoPage, + #[codec(index = 2)] + NoMessage, + #[codec(index = 3)] + AlreadyProcessed, + #[codec(index = 4)] + Queued, + #[codec(index = 5)] + InsufficientWeight, + #[codec(index = 6)] + TemporarilyUnprocessable, + #[codec(index = 7)] + QueuePaused, + #[codec(index = 8)] + RecursiveDisallowed, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + ProcessingFailed { + id: ::subxt::utils::H256, + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + error: runtime_types::frame_support::traits::messages::ProcessMessageError, + }, + #[codec(index = 1)] + Processed { + id: ::subxt::utils::H256, + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + weight_used: ::sp_weights::Weight, + success: ::core::primitive::bool, + }, + #[codec(index = 2)] + OverweightEnqueued { + id: [::core::primitive::u8; 32usize], + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + page_index: ::core::primitive::u32, + message_index: ::core::primitive::u32, + }, + #[codec(index = 3)] + PageReaped { + origin: + runtime_types::bridge_hub_common::message_queue::AggregateMessageOrigin, + index: ::core::primitive::u32, + }, + } + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct BookState<_0> { + pub begin: ::core::primitive::u32, + pub end: ::core::primitive::u32, + pub count: ::core::primitive::u32, + pub ready_neighbours: + ::core::option::Option>, + pub message_count: ::core::primitive::u64, + pub size: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Neighbours<_0> { + pub prev: _0, + pub next: _0, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Page<_0> { + pub remaining: _0, + pub remaining_size: _0, + pub first_index: _0, + pub first: _0, + pub last: _0, + pub heap: runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + } + } pub mod pallet_multisig { use super::runtime_types; pub mod pallet { @@ -1779,21 +1967,21 @@ pub mod api { pub enum Call { #[codec(index = 0)] send { - dest: ::std::boxed::Box, + dest: ::std::boxed::Box, message: ::std::boxed::Box, }, #[codec(index = 1)] teleport_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, }, #[codec(index = 2)] reserve_transfer_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, }, #[codec(index = 3)] @@ -1803,9 +1991,8 @@ pub mod api { }, #[codec(index = 4)] force_xcm_version { - location: ::std::boxed::Box< - runtime_types::staging_xcm::v3::multilocation::MultiLocation, - >, + location: + ::std::boxed::Box, version: ::core::primitive::u32, }, #[codec(index = 5)] @@ -1814,30 +2001,43 @@ pub mod api { }, #[codec(index = 6)] force_subscribe_version_notify { - location: ::std::boxed::Box, + location: ::std::boxed::Box, }, #[codec(index = 7)] force_unsubscribe_version_notify { - location: ::std::boxed::Box, + location: ::std::boxed::Box, }, #[codec(index = 8)] limited_reserve_transfer_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, weight_limit: runtime_types::xcm::v3::WeightLimit, }, #[codec(index = 9)] limited_teleport_assets { - dest: ::std::boxed::Box, - beneficiary: ::std::boxed::Box, - assets: ::std::boxed::Box, + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, fee_asset_item: ::core::primitive::u32, weight_limit: runtime_types::xcm::v3::WeightLimit, }, #[codec(index = 10)] force_suspension { suspended: ::core::primitive::bool }, + #[codec(index = 11)] + transfer_assets { + dest: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + assets: ::std::boxed::Box, + fee_asset_item: ::core::primitive::u32, + weight_limit: runtime_types::xcm::v3::WeightLimit, + }, + #[codec(index = 12)] + claim_assets { + assets: ::std::boxed::Box, + beneficiary: ::std::boxed::Box, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Error { @@ -1868,7 +2068,7 @@ pub mod api { #[codec(index = 12)] AlreadySubscribed, #[codec(index = 13)] - InvalidAsset, + CannotCheckOutTeleport, #[codec(index = 14)] LowBalance, #[codec(index = 15)] @@ -1881,27 +2081,37 @@ pub mod api { LockNotFound, #[codec(index = 19)] InUse, + #[codec(index = 20)] + InvalidAssetNotConcrete, + #[codec(index = 21)] + InvalidAssetUnknownReserve, + #[codec(index = 22)] + InvalidAssetUnsupportedReserve, + #[codec(index = 23)] + TooManyReserves, + #[codec(index = 24)] + LocalExecutionIncomplete, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Event { #[codec(index = 0)] - Attempted { outcome: runtime_types::xcm::v3::traits::Outcome }, + Attempted { outcome: runtime_types::staging_xcm::v4::traits::Outcome }, #[codec(index = 1)] Sent { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - message: runtime_types::xcm::v3::Xcm, + origin: runtime_types::staging_xcm::v4::location::Location, + destination: runtime_types::staging_xcm::v4::location::Location, + message: runtime_types::staging_xcm::v4::Xcm, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 2)] UnexpectedResponse { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, }, #[codec(index = 3)] ResponseReady { query_id: ::core::primitive::u64, - response: runtime_types::xcm::v3::Response, + response: runtime_types::staging_xcm::v4::Response, }, #[codec(index = 4)] Notified { @@ -1931,15 +2141,15 @@ pub mod api { }, #[codec(index = 8)] InvalidResponder { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, expected_location: ::core::option::Option< - runtime_types::staging_xcm::v3::multilocation::MultiLocation, + runtime_types::staging_xcm::v4::location::Location, >, }, #[codec(index = 9)] InvalidResponderVersion { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, }, #[codec(index = 10)] @@ -1947,98 +2157,99 @@ pub mod api { #[codec(index = 11)] AssetsTrapped { hash: ::subxt::utils::H256, - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, + origin: runtime_types::staging_xcm::v4::location::Location, + assets: runtime_types::xcm::VersionedAssets, }, #[codec(index = 12)] VersionChangeNotified { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + destination: runtime_types::staging_xcm::v4::location::Location, result: ::core::primitive::u32, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 13)] SupportedVersionChanged { - location: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + location: runtime_types::staging_xcm::v4::location::Location, version: ::core::primitive::u32, }, #[codec(index = 14)] NotifyTargetSendFail { - location: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + location: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, error: runtime_types::xcm::v3::traits::Error, }, #[codec(index = 15)] NotifyTargetMigrationFail { - location: runtime_types::xcm::VersionedMultiLocation, + location: runtime_types::xcm::VersionedLocation, query_id: ::core::primitive::u64, }, #[codec(index = 16)] InvalidQuerierVersion { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, }, #[codec(index = 17)] InvalidQuerier { - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, + origin: runtime_types::staging_xcm::v4::location::Location, query_id: ::core::primitive::u64, - expected_querier: - runtime_types::staging_xcm::v3::multilocation::MultiLocation, + expected_querier: runtime_types::staging_xcm::v4::location::Location, maybe_actual_querier: ::core::option::Option< - runtime_types::staging_xcm::v3::multilocation::MultiLocation, + runtime_types::staging_xcm::v4::location::Location, >, }, #[codec(index = 18)] VersionNotifyStarted { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + destination: runtime_types::staging_xcm::v4::location::Location, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 19)] VersionNotifyRequested { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + destination: runtime_types::staging_xcm::v4::location::Location, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 20)] VersionNotifyUnrequested { - destination: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - cost: runtime_types::xcm::v3::multiasset::MultiAssets, + destination: runtime_types::staging_xcm::v4::location::Location, + cost: runtime_types::staging_xcm::v4::asset::Assets, message_id: [::core::primitive::u8; 32usize], }, #[codec(index = 21)] FeesPaid { - paying: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - fees: runtime_types::xcm::v3::multiasset::MultiAssets, + paying: runtime_types::staging_xcm::v4::location::Location, + fees: runtime_types::staging_xcm::v4::asset::Assets, }, #[codec(index = 22)] AssetsClaimed { hash: ::subxt::utils::H256, - origin: runtime_types::staging_xcm::v3::multilocation::MultiLocation, - assets: runtime_types::xcm::VersionedMultiAssets, + origin: runtime_types::staging_xcm::v4::location::Location, + assets: runtime_types::xcm::VersionedAssets, }, + #[codec(index = 23)] + VersionMigrationFinished { version: ::core::primitive::u32 }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Origin { #[codec(index = 0)] - Xcm(runtime_types::staging_xcm::v3::multilocation::MultiLocation), + Xcm(runtime_types::staging_xcm::v4::location::Location), #[codec(index = 1)] - Response(runtime_types::staging_xcm::v3::multilocation::MultiLocation), + Response(runtime_types::staging_xcm::v4::location::Location), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum QueryStatus<_0> { #[codec(index = 0)] Pending { - responder: runtime_types::xcm::VersionedMultiLocation, + responder: runtime_types::xcm::VersionedLocation, maybe_match_querier: - ::core::option::Option, + ::core::option::Option, maybe_notify: ::core::option::Option<(::core::primitive::u8, ::core::primitive::u8)>, timeout: _0, }, #[codec(index = 1)] VersionNotifier { - origin: runtime_types::xcm::VersionedMultiLocation, + origin: runtime_types::xcm::VersionedLocation, is_active: ::core::primitive::bool, }, #[codec(index = 2)] @@ -2047,8 +2258,8 @@ pub mod api { #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct RemoteLockedFungibleRecord<_0> { pub amount: ::core::primitive::u128, - pub owner: runtime_types::xcm::VersionedMultiLocation, - pub locker: runtime_types::xcm::VersionedMultiLocation, + pub owner: runtime_types::xcm::VersionedLocation, + pub locker: runtime_types::xcm::VersionedLocation, pub consumers: runtime_types::bounded_collections::bounded_vec::BoundedVec<( _0, ::core::primitive::u128, @@ -2104,15 +2315,6 @@ pub mod api { pub struct Id(pub ::core::primitive::u32); #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub struct ValidationCode(pub ::std::vec::Vec<::core::primitive::u8>); - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum XcmpMessageFormat { - #[codec(index = 0)] - ConcatenatedVersionedXcm, - #[codec(index = 1)] - ConcatenatedEncodedBlob, - #[codec(index = 2)] - Signals, - } } } pub mod polkadot_primitives { @@ -2172,339 +2374,1739 @@ pub mod api { } } } - pub mod sp_arithmetic { + pub mod primitive_types { use super::runtime_types; - pub mod fixed_point { - use super::runtime_types; - #[derive( - :: codec :: Decode, - :: codec :: Encode, - :: subxt :: ext :: codec :: CompactAs, - Clone, - Debug, - PartialEq, - )] - pub struct FixedU128(pub ::core::primitive::u128); - } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum ArithmeticError { - #[codec(index = 0)] - Underflow, - #[codec(index = 1)] - Overflow, - #[codec(index = 2)] - DivisionByZero, - } + pub struct U256(pub [::core::primitive::u64; 4usize]); } - pub mod sp_consensus_aura { + pub mod snowbridge_amcl { use super::runtime_types; - pub mod sr25519 { + pub mod bls381 { use super::runtime_types; - pub mod app_sr25519 { + pub mod big { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub runtime_types::sp_core::sr25519::Public); + pub struct Big { + pub w: [::core::primitive::i32; 14usize], + } + } + pub mod ecp { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ECP { + pub x: runtime_types::snowbridge_amcl::bls381::fp::FP, + pub y: runtime_types::snowbridge_amcl::bls381::fp::FP, + pub z: runtime_types::snowbridge_amcl::bls381::fp::FP, + } + } + pub mod fp { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct FP { + pub x: runtime_types::snowbridge_amcl::bls381::big::Big, + pub xes: ::core::primitive::i32, + } } } } - pub mod sp_consensus_grandpa { + pub mod snowbridge_beacon_primitives { use super::runtime_types; - pub mod app { + pub mod bls { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub runtime_types::sp_core::ed25519::Public); + pub enum BlsError { + #[codec(index = 0)] + InvalidSignature, + #[codec(index = 1)] + InvalidPublicKey, + #[codec(index = 2)] + InvalidAggregatePublicKeys, + #[codec(index = 3)] + SignatureVerificationFailed, + } + } + pub mod types { + use super::runtime_types; + pub mod deneb { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionPayloadHeader { + pub parent_hash: ::subxt::utils::H256, + pub fee_recipient: ::subxt::utils::H160, + pub state_root: ::subxt::utils::H256, + pub receipts_root: ::subxt::utils::H256, + pub logs_bloom: ::std::vec::Vec<::core::primitive::u8>, + pub prev_randao: ::subxt::utils::H256, + pub block_number: ::core::primitive::u64, + pub gas_limit: ::core::primitive::u64, + pub gas_used: ::core::primitive::u64, + pub timestamp: ::core::primitive::u64, + pub extra_data: ::std::vec::Vec<::core::primitive::u8>, + pub base_fee_per_gas: runtime_types::primitive_types::U256, + pub block_hash: ::subxt::utils::H256, + pub transactions_root: ::subxt::utils::H256, + pub withdrawals_root: ::subxt::utils::H256, + pub blob_gas_used: ::core::primitive::u64, + pub excess_blob_gas: ::core::primitive::u64, + } + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub runtime_types::sp_core::ed25519::Signature); + pub struct BeaconHeader { + pub slot: ::core::primitive::u64, + pub proposer_index: ::core::primitive::u64, + pub parent_root: ::subxt::utils::H256, + pub state_root: ::subxt::utils::H256, + pub body_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CompactBeaconState { + #[codec(compact)] + pub slot: ::core::primitive::u64, + pub block_roots_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CompactExecutionHeader { + pub parent_hash: ::subxt::utils::H256, + #[codec(compact)] + pub block_number: ::core::primitive::u64, + pub state_root: ::subxt::utils::H256, + pub receipts_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionHeaderState { + pub beacon_block_root: ::subxt::utils::H256, + pub beacon_slot: ::core::primitive::u64, + pub block_hash: ::subxt::utils::H256, + pub block_number: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionPayloadHeader { + pub parent_hash: ::subxt::utils::H256, + pub fee_recipient: ::subxt::utils::H160, + pub state_root: ::subxt::utils::H256, + pub receipts_root: ::subxt::utils::H256, + pub logs_bloom: ::std::vec::Vec<::core::primitive::u8>, + pub prev_randao: ::subxt::utils::H256, + pub block_number: ::core::primitive::u64, + pub gas_limit: ::core::primitive::u64, + pub gas_used: ::core::primitive::u64, + pub timestamp: ::core::primitive::u64, + pub extra_data: ::std::vec::Vec<::core::primitive::u8>, + pub base_fee_per_gas: runtime_types::primitive_types::U256, + pub block_hash: ::subxt::utils::H256, + pub transactions_root: ::subxt::utils::H256, + pub withdrawals_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Fork { + pub version: [::core::primitive::u8; 4usize], + pub epoch: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ForkVersions { + pub genesis: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub altair: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub bellatrix: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub capella: runtime_types::snowbridge_beacon_primitives::types::Fork, + pub deneb: runtime_types::snowbridge_beacon_primitives::types::Fork, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct PublicKey(pub [::core::primitive::u8; 48usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 96usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct SyncAggregate { + pub sync_committee_bits: [::core::primitive::u8; 64usize], + pub sync_committee_signature: + runtime_types::snowbridge_beacon_primitives::types::Signature, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct SyncCommittee { + pub pubkeys: + [runtime_types::snowbridge_beacon_primitives::types::PublicKey; 512usize], + pub aggregate_pubkey: + runtime_types::snowbridge_beacon_primitives::types::PublicKey, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct SyncCommitteePrepared { + pub root: ::subxt::utils::H256, + pub pubkeys: ::std::boxed::Box< + [runtime_types::snowbridge_milagro_bls::keys::PublicKey; 512usize], + >, + pub aggregate_pubkey: runtime_types::snowbridge_milagro_bls::keys::PublicKey, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum VersionedExecutionPayloadHeader { + # [codec (index = 0)] Capella (runtime_types :: snowbridge_beacon_primitives :: types :: ExecutionPayloadHeader ,) , # [codec (index = 1)] Deneb (runtime_types :: snowbridge_beacon_primitives :: types :: deneb :: ExecutionPayloadHeader ,) , } + } + pub mod updates { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct AncestryProof { + pub header_branch: ::std::vec::Vec<::subxt::utils::H256>, + pub finalized_block_root: ::subxt::utils::H256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CheckpointUpdate { + pub header: runtime_types::snowbridge_beacon_primitives::types::BeaconHeader, + pub current_sync_committee: + runtime_types::snowbridge_beacon_primitives::types::SyncCommittee, + pub current_sync_committee_branch: ::std::vec::Vec<::subxt::utils::H256>, + pub validators_root: ::subxt::utils::H256, + pub block_roots_root: ::subxt::utils::H256, + pub block_roots_branch: ::std::vec::Vec<::subxt::utils::H256>, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ExecutionHeaderUpdate { pub header : runtime_types :: snowbridge_beacon_primitives :: types :: BeaconHeader , pub ancestry_proof : :: core :: option :: Option < runtime_types :: snowbridge_beacon_primitives :: updates :: AncestryProof > , pub execution_header : runtime_types :: snowbridge_beacon_primitives :: types :: VersionedExecutionPayloadHeader , pub execution_branch : :: std :: vec :: Vec < :: subxt :: utils :: H256 > , } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct NextSyncCommitteeUpdate { + pub next_sync_committee: + runtime_types::snowbridge_beacon_primitives::types::SyncCommittee, + pub next_sync_committee_branch: ::std::vec::Vec<::subxt::utils::H256>, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Update { pub attested_header : runtime_types :: snowbridge_beacon_primitives :: types :: BeaconHeader , pub sync_aggregate : runtime_types :: snowbridge_beacon_primitives :: types :: SyncAggregate , pub signature_slot : :: core :: primitive :: u64 , pub next_sync_committee_update : :: core :: option :: Option < runtime_types :: snowbridge_beacon_primitives :: updates :: NextSyncCommitteeUpdate > , pub finalized_header : runtime_types :: snowbridge_beacon_primitives :: types :: BeaconHeader , pub finality_branch : :: std :: vec :: Vec < :: subxt :: utils :: H256 > , pub block_roots_root : :: subxt :: utils :: H256 , pub block_roots_branch : :: std :: vec :: Vec < :: subxt :: utils :: H256 > , } } } - pub mod sp_consensus_slots { - use super::runtime_types; - #[derive( - :: codec :: Decode, - :: codec :: Encode, - :: subxt :: ext :: codec :: CompactAs, - Clone, - Debug, - PartialEq, - )] - pub struct Slot(pub ::core::primitive::u64); - #[derive( - :: codec :: Decode, - :: codec :: Encode, - :: subxt :: ext :: codec :: CompactAs, - Clone, - Debug, - PartialEq, - )] - pub struct SlotDuration(pub ::core::primitive::u64); - } - pub mod sp_core { + pub mod snowbridge_core { use super::runtime_types; - pub mod crypto { + pub mod inbound { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct KeyTypeId(pub [::core::primitive::u8; 4usize]); + pub struct Log { + pub address: ::subxt::utils::H160, + pub topics: ::std::vec::Vec<::subxt::utils::H256>, + pub data: ::std::vec::Vec<::core::primitive::u8>, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Message { + pub event_log: runtime_types::snowbridge_core::inbound::Log, + pub proof: runtime_types::snowbridge_core::inbound::Proof, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Proof { + pub block_hash: ::subxt::utils::H256, + pub tx_index: ::core::primitive::u32, + pub data: ( + ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + ), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum VerificationError { + #[codec(index = 0)] + HeaderNotFound, + #[codec(index = 1)] + LogNotFound, + #[codec(index = 2)] + InvalidLog, + #[codec(index = 3)] + InvalidProof, + } } - pub mod ecdsa { + pub mod operating_mode { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub [::core::primitive::u8; 65usize]); + pub enum BasicOperatingMode { + #[codec(index = 0)] + Normal, + #[codec(index = 1)] + Halted, + } } - pub mod ed25519 { + pub mod outbound { use super::runtime_types; + pub mod v1 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AgentExecuteCommand { + #[codec(index = 0)] + TransferToken { + token: ::subxt::utils::H160, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Command { + #[codec(index = 0)] + AgentExecute { + agent_id: ::subxt::utils::H256, + command: + runtime_types::snowbridge_core::outbound::v1::AgentExecuteCommand, + }, + #[codec(index = 1)] + Upgrade { + impl_address: ::subxt::utils::H160, + impl_code_hash: ::subxt::utils::H256, + initializer: ::core::option::Option< + runtime_types::snowbridge_core::outbound::v1::Initializer, + >, + }, + #[codec(index = 2)] + CreateAgent { agent_id: ::subxt::utils::H256 }, + #[codec(index = 3)] + CreateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + agent_id: ::subxt::utils::H256, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 4)] + UpdateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 5)] + SetOperatingMode { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 6)] + TransferNativeFromAgent { + agent_id: ::subxt::utils::H256, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 7)] + SetTokenTransferFees { + create_asset_xcm: ::core::primitive::u128, + transfer_asset_xcm: ::core::primitive::u128, + register_token: runtime_types::primitive_types::U256, + }, + #[codec(index = 8)] + SetPricingParameters { + exchange_rate: runtime_types::snowbridge_core::pricing::UD60x18, + delivery_cost: ::core::primitive::u128, + multiplier: runtime_types::snowbridge_core::pricing::UD60x18, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Initializer { + pub params: ::std::vec::Vec<::core::primitive::u8>, + pub maximum_required_gas: ::core::primitive::u64, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum OperatingMode { + #[codec(index = 0)] + Normal, + #[codec(index = 1)] + RejectingOutboundMessages, + } + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub [::core::primitive::u8; 32usize]); + pub struct Fee<_0> { + pub local: _0, + pub remote: _0, + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub [::core::primitive::u8; 64usize]); + pub enum SendError { + #[codec(index = 0)] + MessageTooLarge, + #[codec(index = 1)] + Halted, + #[codec(index = 2)] + InvalidChannel, + } } - pub mod sr25519 { + pub mod pricing { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Public(pub [::core::primitive::u8; 32usize]); + pub struct PricingParameters<_0> { + pub exchange_rate: runtime_types::sp_arithmetic::fixed_point::FixedU128, + pub rewards: runtime_types::snowbridge_core::pricing::Rewards<_0>, + pub fee_per_gas: runtime_types::primitive_types::U256, + pub multiplier: runtime_types::sp_arithmetic::fixed_point::FixedU128, + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Signature(pub [::core::primitive::u8; 64usize]); + pub struct Rewards<_0> { + pub local: _0, + pub remote: runtime_types::primitive_types::U256, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct UD60x18(pub runtime_types::primitive_types::U256); } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct OpaqueMetadata(pub ::std::vec::Vec<::core::primitive::u8>); + pub struct Channel { + pub agent_id: ::subxt::utils::H256, + pub para_id: runtime_types::polkadot_parachain_primitives::primitives::Id, + } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Void {} + pub struct ChannelId(pub [::core::primitive::u8; 32usize]); } - pub mod sp_inherents { + pub mod snowbridge_milagro_bls { use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct CheckInherentsResult { - pub okay: ::core::primitive::bool, - pub fatal_error: ::core::primitive::bool, - pub errors: runtime_types::sp_inherents::InherentData, + pub mod keys { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct PublicKey { + pub point: runtime_types::snowbridge_amcl::bls381::ecp::ECP, + } } + } + pub mod snowbridge_outbound_queue_merkle_tree { + use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct InherentData { - pub data: ::subxt::utils::KeyedVec< - [::core::primitive::u8; 8usize], - ::std::vec::Vec<::core::primitive::u8>, - >, + pub struct MerkleProof { + pub root: ::subxt::utils::H256, + pub proof: ::std::vec::Vec<::subxt::utils::H256>, + pub number_of_leaves: ::core::primitive::u64, + pub leaf_index: ::core::primitive::u64, + pub leaf: ::subxt::utils::H256, } } - pub mod sp_runtime { + pub mod snowbridge_pallet_ethereum_client { use super::runtime_types; - pub mod generic { - use super::runtime_types; - pub mod block { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct Block<_0, _1> { - pub header: _0, - pub extrinsics: ::std::vec::Vec<_1>, - } - } - pub mod digest { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum DigestItem { - #[codec(index = 6)] - PreRuntime( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 4)] - Consensus( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 5)] - Seal( - [::core::primitive::u8; 4usize], - ::std::vec::Vec<::core::primitive::u8>, - ), - #[codec(index = 0)] - Other(::std::vec::Vec<::core::primitive::u8>), - #[codec(index = 8)] - RuntimeEnvironmentUpdated, - } - } - } - pub mod transaction_validity { + pub mod pallet { use super::runtime_types; #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum InvalidTransaction { + pub enum Call { + # [codec (index = 0)] force_checkpoint { update : :: std :: boxed :: Box < runtime_types :: snowbridge_beacon_primitives :: updates :: CheckpointUpdate > , } , # [codec (index = 1)] submit { update : :: std :: boxed :: Box < runtime_types :: snowbridge_beacon_primitives :: updates :: Update > , } , # [codec (index = 2)] submit_execution_header { update : :: std :: boxed :: Box < runtime_types :: snowbridge_beacon_primitives :: updates :: ExecutionHeaderUpdate > , } , # [codec (index = 3)] set_operating_mode { mode : runtime_types :: snowbridge_core :: operating_mode :: BasicOperatingMode , } , } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { #[codec(index = 0)] - Call, + SkippedSyncCommitteePeriod, #[codec(index = 1)] - Payment, + IrrelevantUpdate, #[codec(index = 2)] - Future, + NotBootstrapped, #[codec(index = 3)] - Stale, + SyncCommitteeParticipantsNotSupermajority, #[codec(index = 4)] - BadProof, + InvalidHeaderMerkleProof, #[codec(index = 5)] - AncientBirthBlock, + InvalidSyncCommitteeMerkleProof, #[codec(index = 6)] - ExhaustsResources, + InvalidExecutionHeaderProof, #[codec(index = 7)] - Custom(::core::primitive::u8), + InvalidAncestryMerkleProof, #[codec(index = 8)] - BadMandatory, + InvalidBlockRootsRootMerkleProof, #[codec(index = 9)] - MandatoryValidation, + InvalidFinalizedHeaderGap, #[codec(index = 10)] - BadSigner, + HeaderNotFinalized, + #[codec(index = 11)] + BlockBodyHashTreeRootFailed, + #[codec(index = 12)] + HeaderHashTreeRootFailed, + #[codec(index = 13)] + SyncCommitteeHashTreeRootFailed, + #[codec(index = 14)] + SigningRootHashTreeRootFailed, + #[codec(index = 15)] + ForkDataHashTreeRootFailed, + #[codec(index = 16)] + ExpectedFinalizedHeaderNotStored, + #[codec(index = 17)] + BLSPreparePublicKeysFailed, + #[codec(index = 18)] + BLSVerificationFailed( + runtime_types::snowbridge_beacon_primitives::bls::BlsError, + ), + #[codec(index = 19)] + InvalidUpdateSlot, + #[codec(index = 20)] + InvalidSyncCommitteeUpdate, + #[codec(index = 21)] + ExecutionHeaderTooFarBehind, + #[codec(index = 22)] + ExecutionHeaderSkippedBlock, + #[codec(index = 23)] + Halted, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + BeaconHeaderImported { + block_hash: ::subxt::utils::H256, + slot: ::core::primitive::u64, + }, + #[codec(index = 1)] + ExecutionHeaderImported { + block_hash: ::subxt::utils::H256, + block_number: ::core::primitive::u64, + }, + #[codec(index = 2)] + SyncCommitteeUpdated { period: ::core::primitive::u64 }, + #[codec(index = 3)] + OperatingModeChanged { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + } + } + pub mod snowbridge_pallet_inbound_queue { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + submit { message: runtime_types::snowbridge_core::inbound::Message }, + #[codec(index = 1)] + set_operating_mode { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + InvalidGateway, + #[codec(index = 1)] + InvalidEnvelope, + #[codec(index = 2)] + InvalidNonce, + #[codec(index = 3)] + InvalidPayload, + #[codec(index = 4)] + InvalidChannel, + #[codec(index = 5)] + MaxNonceReached, + #[codec(index = 6)] + InvalidAccountConversion, + #[codec(index = 7)] + Halted, + #[codec(index = 8)] + Verification(runtime_types::snowbridge_core::inbound::VerificationError), + #[codec(index = 9)] + Send(runtime_types::snowbridge_pallet_inbound_queue::pallet::SendError), + #[codec(index = 10)] + ConvertMessage( + runtime_types::snowbridge_router_primitives::inbound::ConvertMessageError, + ), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + MessageReceived { + channel_id: runtime_types::snowbridge_core::ChannelId, + nonce: ::core::primitive::u64, + message_id: [::core::primitive::u8; 32usize], + fee_burned: ::core::primitive::u128, + }, + #[codec(index = 1)] + OperatingModeChanged { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum SendError { + #[codec(index = 0)] + NotApplicable, + #[codec(index = 1)] + NotRoutable, + #[codec(index = 2)] + Transport, + #[codec(index = 3)] + DestinationUnsupported, + #[codec(index = 4)] + ExceedsMaxMessageSize, + #[codec(index = 5)] + MissingArgument, + #[codec(index = 6)] + Fees, + } + } + } + pub mod snowbridge_pallet_outbound_queue { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + set_operating_mode { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + MessageTooLarge, + #[codec(index = 1)] + Halted, + #[codec(index = 2)] + InvalidChannel, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + MessageQueued { id: ::subxt::utils::H256 }, + #[codec(index = 1)] + MessageAccepted { id: ::subxt::utils::H256, nonce: ::core::primitive::u64 }, + #[codec(index = 2)] + MessagesCommitted { root: ::subxt::utils::H256, count: ::core::primitive::u64 }, + #[codec(index = 3)] + OperatingModeChanged { + mode: runtime_types::snowbridge_core::operating_mode::BasicOperatingMode, + }, + } + } + pub mod types { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CommittedMessage { + pub channel_id: runtime_types::snowbridge_core::ChannelId, + #[codec(compact)] + pub nonce: ::core::primitive::u64, + pub command: ::core::primitive::u8, + pub params: ::std::vec::Vec<::core::primitive::u8>, + #[codec(compact)] + pub max_dispatch_gas: ::core::primitive::u64, + #[codec(compact)] + pub max_fee_per_gas: ::core::primitive::u128, + #[codec(compact)] + pub reward: ::core::primitive::u128, + pub id: ::subxt::utils::H256, + } + } + } + pub mod snowbridge_pallet_system { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call { + #[codec(index = 0)] + upgrade { + impl_address: ::subxt::utils::H160, + impl_code_hash: ::subxt::utils::H256, + initializer: ::core::option::Option< + runtime_types::snowbridge_core::outbound::v1::Initializer, + >, + }, + #[codec(index = 1)] + set_operating_mode { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 2)] + set_pricing_parameters { + params: runtime_types::snowbridge_core::pricing::PricingParameters< + ::core::primitive::u128, + >, + }, + #[codec(index = 3)] + create_agent, + #[codec(index = 4)] + create_channel { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 5)] + update_channel { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 6)] + force_update_channel { + channel_id: runtime_types::snowbridge_core::ChannelId, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 7)] + transfer_native_from_agent { + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 8)] + force_transfer_native_from_agent { + location: ::std::boxed::Box, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 9)] + set_token_transfer_fees { + create_asset_xcm: ::core::primitive::u128, + transfer_asset_xcm: ::core::primitive::u128, + register_token: runtime_types::primitive_types::U256, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Error { + #[codec(index = 0)] + LocationConversionFailed, + #[codec(index = 1)] + AgentAlreadyCreated, + #[codec(index = 2)] + NoAgent, + #[codec(index = 3)] + ChannelAlreadyCreated, + #[codec(index = 4)] + NoChannel, + #[codec(index = 5)] + UnsupportedLocationVersion, + #[codec(index = 6)] + InvalidLocation, + #[codec(index = 7)] + Send(runtime_types::snowbridge_core::outbound::SendError), + #[codec(index = 8)] + InvalidTokenTransferFees, + #[codec(index = 9)] + InvalidPricingParameters, + #[codec(index = 10)] + InvalidUpgradeParameters, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Event { + #[codec(index = 0)] + Upgrade { + impl_address: ::subxt::utils::H160, + impl_code_hash: ::subxt::utils::H256, + initializer_params_hash: ::core::option::Option<::subxt::utils::H256>, + }, + #[codec(index = 1)] + CreateAgent { + location: + ::std::boxed::Box, + agent_id: ::subxt::utils::H256, + }, + #[codec(index = 2)] + CreateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + agent_id: ::subxt::utils::H256, + }, + #[codec(index = 3)] + UpdateChannel { + channel_id: runtime_types::snowbridge_core::ChannelId, + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 4)] + SetOperatingMode { + mode: runtime_types::snowbridge_core::outbound::v1::OperatingMode, + }, + #[codec(index = 5)] + TransferNativeFromAgent { + agent_id: ::subxt::utils::H256, + recipient: ::subxt::utils::H160, + amount: ::core::primitive::u128, + }, + #[codec(index = 6)] + SetTokenTransferFees { + create_asset_xcm: ::core::primitive::u128, + transfer_asset_xcm: ::core::primitive::u128, + register_token: runtime_types::primitive_types::U256, + }, + #[codec(index = 7)] + PricingParametersChanged { + params: runtime_types::snowbridge_core::pricing::PricingParameters< + ::core::primitive::u128, + >, + }, + } + } + } + pub mod snowbridge_router_primitives { + use super::runtime_types; + pub mod inbound { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum ConvertMessageError { + #[codec(index = 0)] + UnsupportedVersion, + } + } + } + pub mod sp_arithmetic { + use super::runtime_types; + pub mod fixed_point { + use super::runtime_types; + #[derive( + :: codec :: Decode, + :: codec :: Encode, + :: subxt :: ext :: codec :: CompactAs, + Clone, + Debug, + PartialEq, + )] + pub struct FixedU128(pub ::core::primitive::u128); + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum ArithmeticError { + #[codec(index = 0)] + Underflow, + #[codec(index = 1)] + Overflow, + #[codec(index = 2)] + DivisionByZero, + } + } + pub mod sp_consensus_aura { + use super::runtime_types; + pub mod sr25519 { + use super::runtime_types; + pub mod app_sr25519 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub runtime_types::sp_core::sr25519::Public); + } + } + } + pub mod sp_consensus_grandpa { + use super::runtime_types; + pub mod app { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub runtime_types::sp_core::ed25519::Public); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub runtime_types::sp_core::ed25519::Signature); + } + } + pub mod sp_consensus_slots { + use super::runtime_types; + #[derive( + :: codec :: Decode, + :: codec :: Encode, + :: subxt :: ext :: codec :: CompactAs, + Clone, + Debug, + PartialEq, + )] + pub struct Slot(pub ::core::primitive::u64); + #[derive( + :: codec :: Decode, + :: codec :: Encode, + :: subxt :: ext :: codec :: CompactAs, + Clone, + Debug, + PartialEq, + )] + pub struct SlotDuration(pub ::core::primitive::u64); + } + pub mod sp_core { + use super::runtime_types; + pub mod crypto { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct KeyTypeId(pub [::core::primitive::u8; 4usize]); + } + pub mod ecdsa { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 65usize]); + } + pub mod ed25519 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub [::core::primitive::u8; 32usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 64usize]); + } + pub mod sr25519 { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Public(pub [::core::primitive::u8; 32usize]); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Signature(pub [::core::primitive::u8; 64usize]); + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct OpaqueMetadata(pub ::std::vec::Vec<::core::primitive::u8>); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Void {} + } + pub mod sp_inherents { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct CheckInherentsResult { + pub okay: ::core::primitive::bool, + pub fatal_error: ::core::primitive::bool, + pub errors: runtime_types::sp_inherents::InherentData, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct InherentData { + pub data: ::subxt::utils::KeyedVec< + [::core::primitive::u8; 8usize], + ::std::vec::Vec<::core::primitive::u8>, + >, + } + } + pub mod sp_runtime { + use super::runtime_types; + pub mod generic { + use super::runtime_types; + pub mod block { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Block<_0, _1> { + pub header: _0, + pub extrinsics: ::std::vec::Vec<_1>, + } + } + pub mod digest { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum DigestItem { + #[codec(index = 6)] + PreRuntime( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 4)] + Consensus( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 5)] + Seal( + [::core::primitive::u8; 4usize], + ::std::vec::Vec<::core::primitive::u8>, + ), + #[codec(index = 0)] + Other(::std::vec::Vec<::core::primitive::u8>), + #[codec(index = 8)] + RuntimeEnvironmentUpdated, + } + } + } + pub mod transaction_validity { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum InvalidTransaction { + #[codec(index = 0)] + Call, + #[codec(index = 1)] + Payment, + #[codec(index = 2)] + Future, + #[codec(index = 3)] + Stale, + #[codec(index = 4)] + BadProof, + #[codec(index = 5)] + AncientBirthBlock, + #[codec(index = 6)] + ExhaustsResources, + #[codec(index = 7)] + Custom(::core::primitive::u8), + #[codec(index = 8)] + BadMandatory, + #[codec(index = 9)] + MandatoryValidation, + #[codec(index = 10)] + BadSigner, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TransactionSource { + #[codec(index = 0)] + InBlock, + #[codec(index = 1)] + Local, + #[codec(index = 2)] + External, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TransactionValidityError { + #[codec(index = 0)] + Invalid(runtime_types::sp_runtime::transaction_validity::InvalidTransaction), + #[codec(index = 1)] + Unknown(runtime_types::sp_runtime::transaction_validity::UnknownTransaction), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum UnknownTransaction { + #[codec(index = 0)] + CannotLookup, + #[codec(index = 1)] + NoUnsignedValidator, + #[codec(index = 2)] + Custom(::core::primitive::u8), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ValidTransaction { + pub priority: ::core::primitive::u64, + pub requires: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub provides: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + pub longevity: ::core::primitive::u64, + pub propagate: ::core::primitive::bool, + } + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum DispatchError { + #[codec(index = 0)] + Other, + #[codec(index = 1)] + CannotLookup, + #[codec(index = 2)] + BadOrigin, + #[codec(index = 3)] + Module(runtime_types::sp_runtime::ModuleError), + #[codec(index = 4)] + ConsumerRemaining, + #[codec(index = 5)] + NoProviders, + #[codec(index = 6)] + TooManyConsumers, + #[codec(index = 7)] + Token(runtime_types::sp_runtime::TokenError), + #[codec(index = 8)] + Arithmetic(runtime_types::sp_arithmetic::ArithmeticError), + #[codec(index = 9)] + Transactional(runtime_types::sp_runtime::TransactionalError), + #[codec(index = 10)] + Exhausted, + #[codec(index = 11)] + Corruption, + #[codec(index = 12)] + Unavailable, + #[codec(index = 13)] + RootNotAllowed, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct ModuleError { + pub index: ::core::primitive::u8, + pub error: [::core::primitive::u8; 4usize], + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum MultiSignature { + #[codec(index = 0)] + Ed25519(runtime_types::sp_core::ed25519::Signature), + #[codec(index = 1)] + Sr25519(runtime_types::sp_core::sr25519::Signature), + #[codec(index = 2)] + Ecdsa(runtime_types::sp_core::ecdsa::Signature), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TokenError { + #[codec(index = 0)] + FundsUnavailable, + #[codec(index = 1)] + OnlyProvider, + #[codec(index = 2)] + BelowMinimum, + #[codec(index = 3)] + CannotCreate, + #[codec(index = 4)] + UnknownAsset, + #[codec(index = 5)] + Frozen, + #[codec(index = 6)] + Unsupported, + #[codec(index = 7)] + CannotCreateHold, + #[codec(index = 8)] + NotExpendable, + #[codec(index = 9)] + Blocked, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum TransactionalError { + #[codec(index = 0)] + LimitReached, + #[codec(index = 1)] + NoLayer, + } + } + pub mod sp_trie { + use super::runtime_types; + pub mod storage_proof { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct StorageProof { + pub trie_nodes: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, + } + } + } + pub mod sp_version { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct RuntimeVersion { + pub spec_name: ::std::string::String, + pub impl_name: ::std::string::String, + pub authoring_version: ::core::primitive::u32, + pub spec_version: ::core::primitive::u32, + pub impl_version: ::core::primitive::u32, + pub apis: + ::std::vec::Vec<([::core::primitive::u8; 8usize], ::core::primitive::u32)>, + pub transaction_version: ::core::primitive::u32, + pub state_version: ::core::primitive::u8, + } + } + pub mod sp_weights { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct RuntimeDbWeight { + pub read: ::core::primitive::u64, + pub write: ::core::primitive::u64, + } + } + pub mod staging_parachain_info { + use super::runtime_types; + pub mod pallet { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Call {} + } + } + pub mod staging_xcm { + use super::runtime_types; + pub mod v3 { + use super::runtime_types; + pub mod multilocation { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct MultiLocation { + pub parents: ::core::primitive::u8, + pub interior: runtime_types::xcm::v3::junctions::Junctions, + } + } + } + pub mod v4 { + use super::runtime_types; + pub mod asset { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Asset { + pub id: runtime_types::staging_xcm::v4::asset::AssetId, + pub fun: runtime_types::staging_xcm::v4::asset::Fungibility, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AssetFilter { + #[codec(index = 0)] + Definite(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 1)] + Wild(runtime_types::staging_xcm::v4::asset::WildAsset), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct AssetId(pub runtime_types::staging_xcm::v4::location::Location); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum AssetInstance { + #[codec(index = 0)] + Undefined, + #[codec(index = 1)] + Index(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 2)] + Array4([::core::primitive::u8; 4usize]), + #[codec(index = 3)] + Array8([::core::primitive::u8; 8usize]), + #[codec(index = 4)] + Array16([::core::primitive::u8; 16usize]), + #[codec(index = 5)] + Array32([::core::primitive::u8; 32usize]), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Assets( + pub ::std::vec::Vec, + ); + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Fungibility { + #[codec(index = 0)] + Fungible(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 1)] + NonFungible(runtime_types::staging_xcm::v4::asset::AssetInstance), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum WildAsset { + #[codec(index = 0)] + All, + #[codec(index = 1)] + AllOf { + id: runtime_types::staging_xcm::v4::asset::AssetId, + fun: runtime_types::staging_xcm::v4::asset::WildFungibility, + }, + #[codec(index = 2)] + AllCounted(#[codec(compact)] ::core::primitive::u32), + #[codec(index = 3)] + AllOfCounted { + id: runtime_types::staging_xcm::v4::asset::AssetId, + fun: runtime_types::staging_xcm::v4::asset::WildFungibility, + #[codec(compact)] + count: ::core::primitive::u32, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum WildFungibility { + #[codec(index = 0)] + Fungible, + #[codec(index = 1)] + NonFungible, + } + } + pub mod junction { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Junction { + #[codec(index = 0)] + Parachain(#[codec(compact)] ::core::primitive::u32), + #[codec(index = 1)] + AccountId32 { + network: ::core::option::Option< + runtime_types::staging_xcm::v4::junction::NetworkId, + >, + id: [::core::primitive::u8; 32usize], + }, + #[codec(index = 2)] + AccountIndex64 { + network: ::core::option::Option< + runtime_types::staging_xcm::v4::junction::NetworkId, + >, + #[codec(compact)] + index: ::core::primitive::u64, + }, + #[codec(index = 3)] + AccountKey20 { + network: ::core::option::Option< + runtime_types::staging_xcm::v4::junction::NetworkId, + >, + key: [::core::primitive::u8; 20usize], + }, + #[codec(index = 4)] + PalletInstance(::core::primitive::u8), + #[codec(index = 5)] + GeneralIndex(#[codec(compact)] ::core::primitive::u128), + #[codec(index = 6)] + GeneralKey { + length: ::core::primitive::u8, + data: [::core::primitive::u8; 32usize], + }, + #[codec(index = 7)] + OnlyChild, + #[codec(index = 8)] + Plurality { + id: runtime_types::xcm::v3::junction::BodyId, + part: runtime_types::xcm::v3::junction::BodyPart, + }, + #[codec(index = 9)] + GlobalConsensus(runtime_types::staging_xcm::v4::junction::NetworkId), + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum NetworkId { + #[codec(index = 0)] + ByGenesis([::core::primitive::u8; 32usize]), + #[codec(index = 1)] + ByFork { + block_number: ::core::primitive::u64, + block_hash: [::core::primitive::u8; 32usize], + }, + #[codec(index = 2)] + Polkadot, + #[codec(index = 3)] + Kusama, + #[codec(index = 4)] + Westend, + #[codec(index = 5)] + Rococo, + #[codec(index = 6)] + Wococo, + #[codec(index = 7)] + Ethereum { + #[codec(compact)] + chain_id: ::core::primitive::u64, + }, + #[codec(index = 8)] + BitcoinCore, + #[codec(index = 9)] + BitcoinCash, + #[codec(index = 10)] + PolkadotBulletin, + } + } + pub mod junctions { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Junctions { + #[codec(index = 0)] + Here, + #[codec(index = 1)] + X1([runtime_types::staging_xcm::v4::junction::Junction; 1usize]), + #[codec(index = 2)] + X2([runtime_types::staging_xcm::v4::junction::Junction; 2usize]), + #[codec(index = 3)] + X3([runtime_types::staging_xcm::v4::junction::Junction; 3usize]), + #[codec(index = 4)] + X4([runtime_types::staging_xcm::v4::junction::Junction; 4usize]), + #[codec(index = 5)] + X5([runtime_types::staging_xcm::v4::junction::Junction; 5usize]), + #[codec(index = 6)] + X6([runtime_types::staging_xcm::v4::junction::Junction; 6usize]), + #[codec(index = 7)] + X7([runtime_types::staging_xcm::v4::junction::Junction; 7usize]), + #[codec(index = 8)] + X8([runtime_types::staging_xcm::v4::junction::Junction; 8usize]), + } + } + pub mod location { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub struct Location { + pub parents: ::core::primitive::u8, + pub interior: runtime_types::staging_xcm::v4::junctions::Junctions, + } + } + pub mod traits { + use super::runtime_types; + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Outcome { + #[codec(index = 0)] + Complete { used: ::sp_weights::Weight }, + #[codec(index = 1)] + Incomplete { + used: ::sp_weights::Weight, + error: runtime_types::xcm::v3::traits::Error, + }, + #[codec(index = 2)] + Error { error: runtime_types::xcm::v3::traits::Error }, + } + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Instruction { + #[codec(index = 0)] + WithdrawAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 1)] + ReserveAssetDeposited(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 2)] + ReceiveTeleportedAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 3)] + QueryResponse { + #[codec(compact)] + query_id: ::core::primitive::u64, + response: runtime_types::staging_xcm::v4::Response, + max_weight: ::sp_weights::Weight, + querier: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, + #[codec(index = 4)] + TransferAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 5)] + TransferReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 6)] + Transact { + origin_kind: runtime_types::xcm::v2::OriginKind, + require_weight_at_most: ::sp_weights::Weight, + call: runtime_types::xcm::double_encoded::DoubleEncoded, + }, + #[codec(index = 7)] + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + max_message_size: ::core::primitive::u32, + #[codec(compact)] + max_capacity: ::core::primitive::u32, + }, + #[codec(index = 8)] + HrmpChannelAccepted { + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 9)] + HrmpChannelClosing { + #[codec(compact)] + initiator: ::core::primitive::u32, + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 10)] + ClearOrigin, + #[codec(index = 11)] + DescendOrigin(runtime_types::staging_xcm::v4::junctions::Junctions), + #[codec(index = 12)] + ReportError(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 13)] + DepositAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 14)] + DepositReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 15)] + ExchangeAsset { + give: runtime_types::staging_xcm::v4::asset::AssetFilter, + want: runtime_types::staging_xcm::v4::asset::Assets, + maximal: ::core::primitive::bool, + }, + #[codec(index = 16)] + InitiateReserveWithdraw { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + reserve: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 17)] + InitiateTeleport { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 18)] + ReportHolding { + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + }, + #[codec(index = 19)] + BuyExecution { + fees: runtime_types::staging_xcm::v4::asset::Asset, + weight_limit: runtime_types::xcm::v3::WeightLimit, + }, + #[codec(index = 20)] + RefundSurplus, + #[codec(index = 21)] + SetErrorHandler(runtime_types::staging_xcm::v4::Xcm), + #[codec(index = 22)] + SetAppendix(runtime_types::staging_xcm::v4::Xcm), + #[codec(index = 23)] + ClearError, + #[codec(index = 24)] + ClaimAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + ticket: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 25)] + Trap(#[codec(compact)] ::core::primitive::u64), + #[codec(index = 26)] + SubscribeVersion { + #[codec(compact)] + query_id: ::core::primitive::u64, + max_response_weight: ::sp_weights::Weight, + }, + #[codec(index = 27)] + UnsubscribeVersion, + #[codec(index = 28)] + BurnAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 29)] + ExpectAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 30)] + ExpectOrigin( + ::core::option::Option, + ), + #[codec(index = 31)] + ExpectError( + ::core::option::Option<( + ::core::primitive::u32, + runtime_types::xcm::v3::traits::Error, + )>, + ), + #[codec(index = 32)] + ExpectTransactStatus(runtime_types::xcm::v3::MaybeErrorCode), + #[codec(index = 33)] + QueryPallet { + module_name: ::std::vec::Vec<::core::primitive::u8>, + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + }, + #[codec(index = 34)] + ExpectPallet { + #[codec(compact)] + index: ::core::primitive::u32, + name: ::std::vec::Vec<::core::primitive::u8>, + module_name: ::std::vec::Vec<::core::primitive::u8>, + #[codec(compact)] + crate_major: ::core::primitive::u32, + #[codec(compact)] + min_crate_minor: ::core::primitive::u32, + }, + #[codec(index = 35)] + ReportTransactStatus(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 36)] + ClearTransactStatus, + #[codec(index = 37)] + UniversalOrigin(runtime_types::staging_xcm::v4::junction::Junction), + #[codec(index = 38)] + ExportMessage { + network: runtime_types::staging_xcm::v4::junction::NetworkId, + destination: runtime_types::staging_xcm::v4::junctions::Junctions, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 39)] + LockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + unlocker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 40)] + UnlockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + target: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 41)] + NoteUnlockable { + asset: runtime_types::staging_xcm::v4::asset::Asset, + owner: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 42)] + RequestUnlock { + asset: runtime_types::staging_xcm::v4::asset::Asset, + locker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 43)] + SetFeesMode { jit_withdraw: ::core::primitive::bool }, + #[codec(index = 44)] + SetTopic([::core::primitive::u8; 32usize]), + #[codec(index = 45)] + ClearTopic, + #[codec(index = 46)] + AliasOrigin(runtime_types::staging_xcm::v4::location::Location), + #[codec(index = 47)] + UnpaidExecution { + weight_limit: runtime_types::xcm::v3::WeightLimit, + check_origin: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, + } + #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] + pub enum Instruction2 { + #[codec(index = 0)] + WithdrawAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 1)] + ReserveAssetDeposited(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 2)] + ReceiveTeleportedAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 3)] + QueryResponse { + #[codec(compact)] + query_id: ::core::primitive::u64, + response: runtime_types::staging_xcm::v4::Response, + max_weight: ::sp_weights::Weight, + querier: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, + #[codec(index = 4)] + TransferAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 5)] + TransferReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 6)] + Transact { + origin_kind: runtime_types::xcm::v2::OriginKind, + require_weight_at_most: ::sp_weights::Weight, + call: runtime_types::xcm::double_encoded::DoubleEncoded2, + }, + #[codec(index = 7)] + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + max_message_size: ::core::primitive::u32, + #[codec(compact)] + max_capacity: ::core::primitive::u32, + }, + #[codec(index = 8)] + HrmpChannelAccepted { + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 9)] + HrmpChannelClosing { + #[codec(compact)] + initiator: ::core::primitive::u32, + #[codec(compact)] + sender: ::core::primitive::u32, + #[codec(compact)] + recipient: ::core::primitive::u32, + }, + #[codec(index = 10)] + ClearOrigin, + #[codec(index = 11)] + DescendOrigin(runtime_types::staging_xcm::v4::junctions::Junctions), + #[codec(index = 12)] + ReportError(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 13)] + DepositAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + beneficiary: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 14)] + DepositReserveAsset { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 15)] + ExchangeAsset { + give: runtime_types::staging_xcm::v4::asset::AssetFilter, + want: runtime_types::staging_xcm::v4::asset::Assets, + maximal: ::core::primitive::bool, + }, + #[codec(index = 16)] + InitiateReserveWithdraw { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + reserve: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 17)] + InitiateTeleport { + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + dest: runtime_types::staging_xcm::v4::location::Location, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 18)] + ReportHolding { + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + assets: runtime_types::staging_xcm::v4::asset::AssetFilter, + }, + #[codec(index = 19)] + BuyExecution { + fees: runtime_types::staging_xcm::v4::asset::Asset, + weight_limit: runtime_types::xcm::v3::WeightLimit, + }, + #[codec(index = 20)] + RefundSurplus, + #[codec(index = 21)] + SetErrorHandler(runtime_types::staging_xcm::v4::Xcm2), + #[codec(index = 22)] + SetAppendix(runtime_types::staging_xcm::v4::Xcm2), + #[codec(index = 23)] + ClearError, + #[codec(index = 24)] + ClaimAsset { + assets: runtime_types::staging_xcm::v4::asset::Assets, + ticket: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 25)] + Trap(#[codec(compact)] ::core::primitive::u64), + #[codec(index = 26)] + SubscribeVersion { + #[codec(compact)] + query_id: ::core::primitive::u64, + max_response_weight: ::sp_weights::Weight, + }, + #[codec(index = 27)] + UnsubscribeVersion, + #[codec(index = 28)] + BurnAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 29)] + ExpectAsset(runtime_types::staging_xcm::v4::asset::Assets), + #[codec(index = 30)] + ExpectOrigin( + ::core::option::Option, + ), + #[codec(index = 31)] + ExpectError( + ::core::option::Option<( + ::core::primitive::u32, + runtime_types::xcm::v3::traits::Error, + )>, + ), + #[codec(index = 32)] + ExpectTransactStatus(runtime_types::xcm::v3::MaybeErrorCode), + #[codec(index = 33)] + QueryPallet { + module_name: ::std::vec::Vec<::core::primitive::u8>, + response_info: runtime_types::staging_xcm::v4::QueryResponseInfo, + }, + #[codec(index = 34)] + ExpectPallet { + #[codec(compact)] + index: ::core::primitive::u32, + name: ::std::vec::Vec<::core::primitive::u8>, + module_name: ::std::vec::Vec<::core::primitive::u8>, + #[codec(compact)] + crate_major: ::core::primitive::u32, + #[codec(compact)] + min_crate_minor: ::core::primitive::u32, + }, + #[codec(index = 35)] + ReportTransactStatus(runtime_types::staging_xcm::v4::QueryResponseInfo), + #[codec(index = 36)] + ClearTransactStatus, + #[codec(index = 37)] + UniversalOrigin(runtime_types::staging_xcm::v4::junction::Junction), + #[codec(index = 38)] + ExportMessage { + network: runtime_types::staging_xcm::v4::junction::NetworkId, + destination: runtime_types::staging_xcm::v4::junctions::Junctions, + xcm: runtime_types::staging_xcm::v4::Xcm, + }, + #[codec(index = 39)] + LockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + unlocker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 40)] + UnlockAsset { + asset: runtime_types::staging_xcm::v4::asset::Asset, + target: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 41)] + NoteUnlockable { + asset: runtime_types::staging_xcm::v4::asset::Asset, + owner: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 42)] + RequestUnlock { + asset: runtime_types::staging_xcm::v4::asset::Asset, + locker: runtime_types::staging_xcm::v4::location::Location, + }, + #[codec(index = 43)] + SetFeesMode { jit_withdraw: ::core::primitive::bool }, + #[codec(index = 44)] + SetTopic([::core::primitive::u8; 32usize]), + #[codec(index = 45)] + ClearTopic, + #[codec(index = 46)] + AliasOrigin(runtime_types::staging_xcm::v4::location::Location), + #[codec(index = 47)] + UnpaidExecution { + weight_limit: runtime_types::xcm::v3::WeightLimit, + check_origin: ::core::option::Option< + runtime_types::staging_xcm::v4::location::Location, + >, + }, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TransactionSource { - #[codec(index = 0)] - InBlock, - #[codec(index = 1)] - Local, - #[codec(index = 2)] - External, + pub struct PalletInfo { + #[codec(compact)] + pub index: ::core::primitive::u32, + pub name: runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + pub module_name: runtime_types::bounded_collections::bounded_vec::BoundedVec< + ::core::primitive::u8, + >, + #[codec(compact)] + pub major: ::core::primitive::u32, + #[codec(compact)] + pub minor: ::core::primitive::u32, + #[codec(compact)] + pub patch: ::core::primitive::u32, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TransactionValidityError { - #[codec(index = 0)] - Invalid(runtime_types::sp_runtime::transaction_validity::InvalidTransaction), - #[codec(index = 1)] - Unknown(runtime_types::sp_runtime::transaction_validity::UnknownTransaction), + pub struct QueryResponseInfo { + pub destination: runtime_types::staging_xcm::v4::location::Location, + #[codec(compact)] + pub query_id: ::core::primitive::u64, + pub max_weight: ::sp_weights::Weight, } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum UnknownTransaction { + pub enum Response { #[codec(index = 0)] - CannotLookup, + Null, #[codec(index = 1)] - NoUnsignedValidator, + Assets(runtime_types::staging_xcm::v4::asset::Assets), #[codec(index = 2)] - Custom(::core::primitive::u8), + ExecutionResult( + ::core::option::Option<( + ::core::primitive::u32, + runtime_types::xcm::v3::traits::Error, + )>, + ), + #[codec(index = 3)] + Version(::core::primitive::u32), + #[codec(index = 4)] + PalletsInfo( + runtime_types::bounded_collections::bounded_vec::BoundedVec< + runtime_types::staging_xcm::v4::PalletInfo, + >, + ), + #[codec(index = 5)] + DispatchResult(runtime_types::xcm::v3::MaybeErrorCode), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct ValidTransaction { - pub priority: ::core::primitive::u64, - pub requires: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub provides: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - pub longevity: ::core::primitive::u64, - pub propagate: ::core::primitive::bool, - } - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum DispatchError { - #[codec(index = 0)] - Other, - #[codec(index = 1)] - CannotLookup, - #[codec(index = 2)] - BadOrigin, - #[codec(index = 3)] - Module(runtime_types::sp_runtime::ModuleError), - #[codec(index = 4)] - ConsumerRemaining, - #[codec(index = 5)] - NoProviders, - #[codec(index = 6)] - TooManyConsumers, - #[codec(index = 7)] - Token(runtime_types::sp_runtime::TokenError), - #[codec(index = 8)] - Arithmetic(runtime_types::sp_arithmetic::ArithmeticError), - #[codec(index = 9)] - Transactional(runtime_types::sp_runtime::TransactionalError), - #[codec(index = 10)] - Exhausted, - #[codec(index = 11)] - Corruption, - #[codec(index = 12)] - Unavailable, - #[codec(index = 13)] - RootNotAllowed, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct ModuleError { - pub index: ::core::primitive::u8, - pub error: [::core::primitive::u8; 4usize], - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum MultiSignature { - #[codec(index = 0)] - Ed25519(runtime_types::sp_core::ed25519::Signature), - #[codec(index = 1)] - Sr25519(runtime_types::sp_core::sr25519::Signature), - #[codec(index = 2)] - Ecdsa(runtime_types::sp_core::ecdsa::Signature), - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TokenError { - #[codec(index = 0)] - FundsUnavailable, - #[codec(index = 1)] - OnlyProvider, - #[codec(index = 2)] - BelowMinimum, - #[codec(index = 3)] - CannotCreate, - #[codec(index = 4)] - UnknownAsset, - #[codec(index = 5)] - Frozen, - #[codec(index = 6)] - Unsupported, - #[codec(index = 7)] - CannotCreateHold, - #[codec(index = 8)] - NotExpendable, - #[codec(index = 9)] - Blocked, - } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum TransactionalError { - #[codec(index = 0)] - LimitReached, - #[codec(index = 1)] - NoLayer, - } - } - pub mod sp_trie { - use super::runtime_types; - pub mod storage_proof { - use super::runtime_types; + pub struct Xcm(pub ::std::vec::Vec); #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct StorageProof { - pub trie_nodes: ::std::vec::Vec<::std::vec::Vec<::core::primitive::u8>>, - } - } - } - pub mod sp_version { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct RuntimeVersion { - pub spec_name: ::std::string::String, - pub impl_name: ::std::string::String, - pub authoring_version: ::core::primitive::u32, - pub spec_version: ::core::primitive::u32, - pub impl_version: ::core::primitive::u32, - pub apis: - ::std::vec::Vec<([::core::primitive::u8; 8usize], ::core::primitive::u32)>, - pub transaction_version: ::core::primitive::u32, - pub state_version: ::core::primitive::u8, - } - } - pub mod sp_weights { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct RuntimeDbWeight { - pub read: ::core::primitive::u64, - pub write: ::core::primitive::u64, - } - } - pub mod staging_xcm { - use super::runtime_types; - pub mod v3 { - use super::runtime_types; - pub mod multilocation { - use super::runtime_types; - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub struct MultiLocation { - pub parents: ::core::primitive::u8, - pub interior: runtime_types::xcm::v3::junctions::Junctions, - } - } + pub struct Xcm2(pub ::std::vec::Vec); } } pub mod xcm { @@ -3279,6 +4881,8 @@ pub mod api { BitcoinCore, #[codec(index = 9)] BitcoinCash, + #[codec(index = 10)] + PolkadotBulletin, } } pub mod junctions { @@ -3506,15 +5110,6 @@ pub mod api { #[codec(index = 39)] ExceedsStackLimit, } - #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum Outcome { - #[codec(index = 0)] - Complete(::sp_weights::Weight), - #[codec(index = 1)] - Incomplete(::sp_weights::Weight, runtime_types::xcm::v3::traits::Error), - #[codec(index = 2)] - Error(runtime_types::xcm::v3::traits::Error), - } } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum Instruction { @@ -4015,20 +5610,26 @@ pub mod api { pub enum VersionedAssetId { #[codec(index = 3)] V3(runtime_types::xcm::v3::multiasset::AssetId), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::asset::AssetId), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum VersionedMultiAssets { + pub enum VersionedAssets { #[codec(index = 1)] V2(runtime_types::xcm::v2::multiasset::MultiAssets), #[codec(index = 3)] V3(runtime_types::xcm::v3::multiasset::MultiAssets), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::asset::Assets), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] - pub enum VersionedMultiLocation { + pub enum VersionedLocation { #[codec(index = 1)] V2(runtime_types::xcm::v2::multilocation::MultiLocation), #[codec(index = 3)] V3(runtime_types::staging_xcm::v3::multilocation::MultiLocation), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::location::Location), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum VersionedResponse { @@ -4036,6 +5637,8 @@ pub mod api { V2(runtime_types::xcm::v2::Response), #[codec(index = 3)] V3(runtime_types::xcm::v3::Response), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::Response), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum VersionedXcm { @@ -4043,6 +5646,8 @@ pub mod api { V2(runtime_types::xcm::v2::Xcm), #[codec(index = 3)] V3(runtime_types::xcm::v3::Xcm), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::Xcm), } #[derive(:: codec :: Decode, :: codec :: Encode, Clone, Debug, PartialEq)] pub enum VersionedXcm2 { @@ -4050,6 +5655,8 @@ pub mod api { V2(runtime_types::xcm::v2::Xcm2), #[codec(index = 3)] V3(runtime_types::xcm::v3::Xcm2), + #[codec(index = 4)] + V4(runtime_types::staging_xcm::v4::Xcm2), } } } diff --git a/relays/client-bridge-hub-polkadot/src/lib.rs b/relay-clients/client-bridge-hub-polkadot/src/lib.rs similarity index 87% rename from relays/client-bridge-hub-polkadot/src/lib.rs rename to relay-clients/client-bridge-hub-polkadot/src/lib.rs index c46516ba7eb20dfb96a724353d02195d526b7d61..60dfde371340640ae2c80d4e4f6357bfc0aae447 100644 --- a/relays/client-bridge-hub-polkadot/src/lib.rs +++ b/relay-clients/client-bridge-hub-polkadot/src/lib.rs @@ -18,13 +18,14 @@ pub mod codegen_runtime; -use bp_bridge_hub_polkadot::{TransactionExtension, AVERAGE_BLOCK_INTERVAL}; -use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; +use bp_bridge_hub_polkadot::{SignedExtension, AVERAGE_BLOCK_INTERVAL}; +use bp_polkadot_core::SuffixedCommonSignedExtensionExt; use codec::Encode; use relay_substrate_client::{ calls::UtilityCall as MockUtilityCall, Chain, ChainWithBalances, ChainWithMessages, - ChainWithTransactions, ChainWithUtilityPallet, Error as SubstrateError, - MockedRuntimeUtilityPallet, SignParam, UnderlyingChainProvider, UnsignedTransaction, + ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, + Error as SubstrateError, MockedRuntimeUtilityPallet, SignParam, SimpleRuntimeVersion, + UnderlyingChainProvider, UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; @@ -40,8 +41,7 @@ pub type BridgeKusamaMessagesCall = runtime_types::pallet_bridge_messages::palle pub type BridgePolkadotBulletinGrandpaCall = runtime_types::pallet_bridge_grandpa::pallet::Call; pub type BridgeKusamaGrandpaCall = runtime_types::pallet_bridge_grandpa::pallet::Call; pub type BridgeParachainCall = runtime_types::pallet_bridge_parachains::pallet::Call; -type UncheckedExtrinsic = - bp_bridge_hub_polkadot::UncheckedExtrinsic; +type UncheckedExtrinsic = bp_bridge_hub_polkadot::UncheckedExtrinsic; type UtilityCall = runtime_types::pallet_utility::pallet::Call; /// Polkadot chain definition @@ -93,7 +93,7 @@ impl ChainWithTransactions for BridgeHubPolkadot { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - TransactionExtension::from_params( + SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -126,3 +126,8 @@ impl ChainWithMessages for BridgeHubPolkadot { const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = bp_bridge_hub_polkadot::FROM_BRIDGE_HUB_POLKADOT_MESSAGE_DETAILS_METHOD; } + +impl ChainWithRuntimeVersion for BridgeHubPolkadot { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_002_000, transaction_version: 3 }); +} diff --git a/relays/client-bridge-hub-polkadot/src/runtime_wrapper.rs b/relay-clients/client-bridge-hub-polkadot/src/runtime_wrapper.rs similarity index 100% rename from relays/client-bridge-hub-polkadot/src/runtime_wrapper.rs rename to relay-clients/client-bridge-hub-polkadot/src/runtime_wrapper.rs diff --git a/relay-clients/client-bridge-hub-rococo/Cargo.toml b/relay-clients/client-bridge-hub-rococo/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a872cea3f5e95ab7ef6236abb814ec7d44432e19 --- /dev/null +++ b/relay-clients/client-bridge-hub-rococo/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "relay-bridge-hub-rococo-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-bridge-hub-rococo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-header-chain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-messages = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +bridge-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-bridge-hub-rococo/src/codegen_runtime.rs b/relay-clients/client-bridge-hub-rococo/src/codegen_runtime.rs similarity index 100% rename from relays/client-bridge-hub-rococo/src/codegen_runtime.rs rename to relay-clients/client-bridge-hub-rococo/src/codegen_runtime.rs diff --git a/relays/client-bridge-hub-rococo/src/lib.rs b/relay-clients/client-bridge-hub-rococo/src/lib.rs similarity index 86% rename from relays/client-bridge-hub-rococo/src/lib.rs rename to relay-clients/client-bridge-hub-rococo/src/lib.rs index 7b11836d9c461fe7f5558fb336d2964efdff3076..ed354fade40623d3a10f02725981c4b2b3de4614 100644 --- a/relays/client-bridge-hub-rococo/src/lib.rs +++ b/relay-clients/client-bridge-hub-rococo/src/lib.rs @@ -18,13 +18,14 @@ pub mod codegen_runtime; -use bp_bridge_hub_rococo::{TransactionExtension, AVERAGE_BLOCK_INTERVAL}; -use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; +use bp_bridge_hub_rococo::{SignedExtension, AVERAGE_BLOCK_INTERVAL}; +use bp_polkadot_core::SuffixedCommonSignedExtensionExt; use codec::Encode; use relay_substrate_client::{ calls::UtilityCall as MockUtilityCall, Chain, ChainWithBalances, ChainWithMessages, - ChainWithTransactions, ChainWithUtilityPallet, Error as SubstrateError, - MockedRuntimeUtilityPallet, SignParam, UnderlyingChainProvider, UnsignedTransaction, + ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, + Error as SubstrateError, MockedRuntimeUtilityPallet, SignParam, SimpleRuntimeVersion, + UnderlyingChainProvider, UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; @@ -38,8 +39,7 @@ pub type BridgeBulletinMessagesCall = runtime_types::pallet_bridge_messages::pal pub type BridgeGrandpaCall = runtime_types::pallet_bridge_grandpa::pallet::Call; pub type BridgeBulletinGrandpaCall = runtime_types::pallet_bridge_grandpa::pallet::Call2; pub type BridgeParachainCall = runtime_types::pallet_bridge_parachains::pallet::Call; -type UncheckedExtrinsic = - bp_bridge_hub_rococo::UncheckedExtrinsic; +type UncheckedExtrinsic = bp_bridge_hub_rococo::UncheckedExtrinsic; type UtilityCall = runtime_types::pallet_utility::pallet::Call; /// Rococo chain definition @@ -91,7 +91,7 @@ impl ChainWithTransactions for BridgeHubRococo { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - TransactionExtension::from_params( + SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -124,3 +124,8 @@ impl ChainWithMessages for BridgeHubRococo { const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = bp_bridge_hub_rococo::FROM_BRIDGE_HUB_ROCOCO_MESSAGE_DETAILS_METHOD; } + +impl ChainWithRuntimeVersion for BridgeHubRococo { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_009_000, transaction_version: 4 }); +} diff --git a/relay-clients/client-bridge-hub-westend/Cargo.toml b/relay-clients/client-bridge-hub-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..2e1e21c26263e31c8cf02109931c6ede955e34a3 --- /dev/null +++ b/relay-clients/client-bridge-hub-westend/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "relay-bridge-hub-westend-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-bridge-hub-westend = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-header-chain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-messages = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-rococo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +bridge-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +[dev-dependencies] +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-bridge-hub-westend/src/codegen_runtime.rs b/relay-clients/client-bridge-hub-westend/src/codegen_runtime.rs similarity index 100% rename from relays/client-bridge-hub-westend/src/codegen_runtime.rs rename to relay-clients/client-bridge-hub-westend/src/codegen_runtime.rs diff --git a/relays/client-bridge-hub-westend/src/lib.rs b/relay-clients/client-bridge-hub-westend/src/lib.rs similarity index 86% rename from relays/client-bridge-hub-westend/src/lib.rs rename to relay-clients/client-bridge-hub-westend/src/lib.rs index 53ef8b52de458c871c7eba2d4550c662aee1053b..3b89324656b7e20c5908a96e674cf89ca5edba6b 100644 --- a/relays/client-bridge-hub-westend/src/lib.rs +++ b/relay-clients/client-bridge-hub-westend/src/lib.rs @@ -18,13 +18,14 @@ pub mod codegen_runtime; -use bp_bridge_hub_westend::{TransactionExtension, AVERAGE_BLOCK_INTERVAL}; -use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; +use bp_bridge_hub_westend::{SignedExtension, AVERAGE_BLOCK_INTERVAL}; +use bp_polkadot_core::SuffixedCommonSignedExtensionExt; use codec::Encode; use relay_substrate_client::{ calls::UtilityCall as MockUtilityCall, Chain, ChainWithBalances, ChainWithMessages, - ChainWithTransactions, ChainWithUtilityPallet, Error as SubstrateError, - MockedRuntimeUtilityPallet, SignParam, UnderlyingChainProvider, UnsignedTransaction, + ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, + Error as SubstrateError, MockedRuntimeUtilityPallet, SignParam, SimpleRuntimeVersion, + UnderlyingChainProvider, UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount}; @@ -36,8 +37,7 @@ pub type RuntimeCall = runtime_types::bridge_hub_westend_runtime::RuntimeCall; pub type BridgeMessagesCall = runtime_types::pallet_bridge_messages::pallet::Call; pub type BridgeGrandpaCall = runtime_types::pallet_bridge_grandpa::pallet::Call; pub type BridgeParachainCall = runtime_types::pallet_bridge_parachains::pallet::Call; -type UncheckedExtrinsic = - bp_bridge_hub_westend::UncheckedExtrinsic; +type UncheckedExtrinsic = bp_bridge_hub_westend::UncheckedExtrinsic; type UtilityCall = runtime_types::pallet_utility::pallet::Call; /// Westend chain definition @@ -89,7 +89,7 @@ impl ChainWithTransactions for BridgeHubWestend { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - TransactionExtension::from_params( + SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -122,3 +122,8 @@ impl ChainWithMessages for BridgeHubWestend { const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str = bp_bridge_hub_westend::FROM_BRIDGE_HUB_WESTEND_MESSAGE_DETAILS_METHOD; } + +impl ChainWithRuntimeVersion for BridgeHubWestend { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_009_000, transaction_version: 4 }); +} diff --git a/relay-clients/client-kusama/Cargo.toml b/relay-clients/client-kusama/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6872d811727f60c18e9f0581012d82143e921fbc --- /dev/null +++ b/relay-clients/client-kusama/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "relay-kusama-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-kusama = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-kusama/src/codegen_runtime.rs b/relay-clients/client-kusama/src/codegen_runtime.rs similarity index 100% rename from relays/client-kusama/src/codegen_runtime.rs rename to relay-clients/client-kusama/src/codegen_runtime.rs diff --git a/relays/client-kusama/src/lib.rs b/relay-clients/client-kusama/src/lib.rs similarity index 86% rename from relays/client-kusama/src/lib.rs rename to relay-clients/client-kusama/src/lib.rs index 900e32f3dbb397269fe28e9db52cfe46d97b041d..9645b52fe288fd3d27553347d932ef6c99991b20 100644 --- a/relays/client-kusama/src/lib.rs +++ b/relay-clients/client-kusama/src/lib.rs @@ -19,11 +19,12 @@ pub mod codegen_runtime; use bp_kusama::{AccountInfoStorageMapKeyProvider, KUSAMA_SYNCED_HEADERS_GRANDPA_INFO_METHOD}; -use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; +use bp_polkadot_core::SuffixedCommonSignedExtensionExt; use codec::Encode; use relay_substrate_client::{ - Chain, ChainWithBalances, ChainWithGrandpa, ChainWithTransactions, Error as SubstrateError, - RelayChain, SignParam, UnderlyingChainProvider, UnsignedTransaction, + Chain, ChainWithBalances, ChainWithGrandpa, ChainWithRuntimeVersion, ChainWithTransactions, + Error as SubstrateError, RelayChain, SignParam, SimpleRuntimeVersion, UnderlyingChainProvider, + UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount, MultiAddress}; @@ -87,7 +88,7 @@ impl RelayChain for Kusama { impl ChainWithTransactions for Kusama { type AccountKeyPair = sp_core::sr25519::Pair; type SignedTransaction = - bp_polkadot_core::UncheckedExtrinsic; + bp_polkadot_core::UncheckedExtrinsic; fn sign_transaction( param: SignParam, @@ -95,7 +96,7 @@ impl ChainWithTransactions for Kusama { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - bp_kusama::TransactionExtension::from_params( + bp_kusama::SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -118,3 +119,8 @@ impl ChainWithTransactions for Kusama { )) } } + +impl ChainWithRuntimeVersion for Kusama { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_001_002, transaction_version: 25 }); +} diff --git a/relay-clients/client-polkadot-bulletin/Cargo.toml b/relay-clients/client-polkadot-bulletin/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..0113daff9e08283aaabd1fceb3bffac71f473290 --- /dev/null +++ b/relay-clients/client-polkadot-bulletin/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "relay-polkadot-bulletin-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-header-chain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-messages = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-bulletin = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bridge-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-polkadot-bulletin/src/codegen_runtime.rs b/relay-clients/client-polkadot-bulletin/src/codegen_runtime.rs similarity index 100% rename from relays/client-polkadot-bulletin/src/codegen_runtime.rs rename to relay-clients/client-polkadot-bulletin/src/codegen_runtime.rs diff --git a/relays/client-polkadot-bulletin/src/lib.rs b/relay-clients/client-polkadot-bulletin/src/lib.rs similarity index 90% rename from relays/client-polkadot-bulletin/src/lib.rs rename to relay-clients/client-polkadot-bulletin/src/lib.rs index b2c9eab704864419bc293365cb6fe3b64289c5b7..8da2e55f5213878fcea3244076c633b834f494f5 100644 --- a/relays/client-polkadot-bulletin/src/lib.rs +++ b/relay-clients/client-polkadot-bulletin/src/lib.rs @@ -21,8 +21,9 @@ mod codegen_runtime; use bp_polkadot_bulletin::POLKADOT_BULLETIN_SYNCED_HEADERS_GRANDPA_INFO_METHOD; use codec::Encode; use relay_substrate_client::{ - Chain, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, ChainWithTransactions, - Error as SubstrateError, SignParam, UnderlyingChainProvider, UnsignedTransaction, + Chain, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, ChainWithRuntimeVersion, + ChainWithTransactions, Error as SubstrateError, SignParam, SimpleRuntimeVersion, + UnderlyingChainProvider, UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount, MultiAddress}; @@ -101,10 +102,8 @@ impl ChainWithBalances for PolkadotBulletin { impl ChainWithTransactions for PolkadotBulletin { type AccountKeyPair = sp_core::sr25519::Pair; - type SignedTransaction = bp_polkadot_bulletin::UncheckedExtrinsic< - Self::Call, - bp_polkadot_bulletin::TransactionExtension, - >; + type SignedTransaction = + bp_polkadot_bulletin::UncheckedExtrinsic; fn sign_transaction( param: SignParam, @@ -112,7 +111,7 @@ impl ChainWithTransactions for PolkadotBulletin { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - bp_polkadot_bulletin::TransactionExtension::from_params( + bp_polkadot_bulletin::SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -133,3 +132,8 @@ impl ChainWithTransactions for PolkadotBulletin { )) } } + +impl ChainWithRuntimeVersion for PolkadotBulletin { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 100, transaction_version: 1 }); +} diff --git a/relay-clients/client-polkadot/Cargo.toml b/relay-clients/client-polkadot/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..66d8cf3b7983b7c987ef2a306b5954b1abfd7dc9 --- /dev/null +++ b/relay-clients/client-polkadot/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "relay-polkadot-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-polkadot = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-polkadot/src/codegen_runtime.rs b/relay-clients/client-polkadot/src/codegen_runtime.rs similarity index 100% rename from relays/client-polkadot/src/codegen_runtime.rs rename to relay-clients/client-polkadot/src/codegen_runtime.rs diff --git a/relays/client-polkadot/src/lib.rs b/relay-clients/client-polkadot/src/lib.rs similarity index 87% rename from relays/client-polkadot/src/lib.rs rename to relay-clients/client-polkadot/src/lib.rs index 9a6c0a113068b45f558e8d0b3e438d05629960df..40127889b88e91344a0cfd3dcc6176928010479d 100644 --- a/relays/client-polkadot/src/lib.rs +++ b/relay-clients/client-polkadot/src/lib.rs @@ -19,11 +19,12 @@ mod codegen_runtime; use bp_polkadot::{AccountInfoStorageMapKeyProvider, POLKADOT_SYNCED_HEADERS_GRANDPA_INFO_METHOD}; -use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; +use bp_polkadot_core::SuffixedCommonSignedExtensionExt; use codec::Encode; use relay_substrate_client::{ - Chain, ChainWithBalances, ChainWithGrandpa, ChainWithTransactions, Error as SubstrateError, - RelayChain, SignParam, UnderlyingChainProvider, UnsignedTransaction, + Chain, ChainWithBalances, ChainWithGrandpa, ChainWithRuntimeVersion, ChainWithTransactions, + Error as SubstrateError, RelayChain, SignParam, SimpleRuntimeVersion, UnderlyingChainProvider, + UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount, MultiAddress}; @@ -87,7 +88,7 @@ impl RelayChain for Polkadot { impl ChainWithTransactions for Polkadot { type AccountKeyPair = sp_core::sr25519::Pair; type SignedTransaction = - bp_polkadot_core::UncheckedExtrinsic; + bp_polkadot_core::UncheckedExtrinsic; fn sign_transaction( param: SignParam, @@ -95,7 +96,7 @@ impl ChainWithTransactions for Polkadot { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - bp_polkadot::TransactionExtension::from_params( + bp_polkadot::SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -118,3 +119,8 @@ impl ChainWithTransactions for Polkadot { )) } } + +impl ChainWithRuntimeVersion for Polkadot { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_001_002, transaction_version: 25 }); +} diff --git a/relays/client-rococo/Cargo.toml b/relay-clients/client-rococo/Cargo.toml similarity index 52% rename from relays/client-rococo/Cargo.toml rename to relay-clients/client-rococo/Cargo.toml index ba546396fac78fdabc3c7f995193c8a0e74696f1..9953f9006ef9ebef10c5dea793037e064ad011b2 100644 --- a/relays/client-rococo/Cargo.toml +++ b/relay-clients/client-rococo/Cargo.toml @@ -1,25 +1,26 @@ [package] name = "relay-rococo-client" version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" +authors.workspace = true +edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true [lints] workspace = true [dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } # Bridge dependencies -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-rococo = { path = "../../primitives/chain-rococo" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-rococo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } # Substrate Dependencies diff --git a/relays/client-rococo/src/codegen_runtime.rs b/relay-clients/client-rococo/src/codegen_runtime.rs similarity index 100% rename from relays/client-rococo/src/codegen_runtime.rs rename to relay-clients/client-rococo/src/codegen_runtime.rs diff --git a/relays/client-rococo/src/lib.rs b/relay-clients/client-rococo/src/lib.rs similarity index 86% rename from relays/client-rococo/src/lib.rs rename to relay-clients/client-rococo/src/lib.rs index 88ecf8223209ec195c18edb53a2471625f9008ca..e1a39ea731904e3bb46ffd141bca040596ab18c0 100644 --- a/relays/client-rococo/src/lib.rs +++ b/relay-clients/client-rococo/src/lib.rs @@ -18,12 +18,13 @@ pub mod codegen_runtime; -use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; +use bp_polkadot_core::SuffixedCommonSignedExtensionExt; use bp_rococo::ROCOCO_SYNCED_HEADERS_GRANDPA_INFO_METHOD; use codec::Encode; use relay_substrate_client::{ - Chain, ChainWithBalances, ChainWithGrandpa, ChainWithTransactions, Error as SubstrateError, - RelayChain, SignParam, UnderlyingChainProvider, UnsignedTransaction, + Chain, ChainWithBalances, ChainWithGrandpa, ChainWithRuntimeVersion, ChainWithTransactions, + Error as SubstrateError, RelayChain, SignParam, SimpleRuntimeVersion, UnderlyingChainProvider, + UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount, MultiAddress}; @@ -87,7 +88,7 @@ impl RelayChain for Rococo { impl ChainWithTransactions for Rococo { type AccountKeyPair = sp_core::sr25519::Pair; type SignedTransaction = - bp_polkadot_core::UncheckedExtrinsic; + bp_polkadot_core::UncheckedExtrinsic; fn sign_transaction( param: SignParam, @@ -95,7 +96,7 @@ impl ChainWithTransactions for Rococo { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - bp_rococo::TransactionExtension::from_params( + bp_rococo::SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -118,3 +119,8 @@ impl ChainWithTransactions for Rococo { )) } } + +impl ChainWithRuntimeVersion for Rococo { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_009_000, transaction_version: 24 }); +} diff --git a/relay-clients/client-westend/Cargo.toml b/relay-clients/client-westend/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..1660035173e41e80881803d4c4beaf898cf8a875 --- /dev/null +++ b/relay-clients/client-westend/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "relay-westend-client" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.6.1", features = ["derive"] } +scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } +subxt = { version = "0.35.3", default-features = false, features = ["native"] } + +# Bridge dependencies + +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-westend = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-westend/src/codegen_runtime.rs b/relay-clients/client-westend/src/codegen_runtime.rs similarity index 100% rename from relays/client-westend/src/codegen_runtime.rs rename to relay-clients/client-westend/src/codegen_runtime.rs diff --git a/relays/client-westend/src/lib.rs b/relay-clients/client-westend/src/lib.rs similarity index 86% rename from relays/client-westend/src/lib.rs rename to relay-clients/client-westend/src/lib.rs index eb1d478fa36457734f2dc3e49d2cbe85201eeeda..7909f687e4837d12e03c56fe189413a54ac8853f 100644 --- a/relays/client-westend/src/lib.rs +++ b/relay-clients/client-westend/src/lib.rs @@ -18,12 +18,13 @@ pub mod codegen_runtime; -use bp_polkadot_core::SuffixedCommonTransactionExtensionExt; +use bp_polkadot_core::SuffixedCommonSignedExtensionExt; use bp_westend::WESTEND_SYNCED_HEADERS_GRANDPA_INFO_METHOD; use codec::Encode; use relay_substrate_client::{ - Chain, ChainWithBalances, ChainWithGrandpa, ChainWithTransactions, Error as SubstrateError, - RelayChain, SignParam, UnderlyingChainProvider, UnsignedTransaction, + Chain, ChainWithBalances, ChainWithGrandpa, ChainWithRuntimeVersion, ChainWithTransactions, + Error as SubstrateError, RelayChain, SignParam, SimpleRuntimeVersion, UnderlyingChainProvider, + UnsignedTransaction, }; use sp_core::{storage::StorageKey, Pair}; use sp_runtime::{generic::SignedPayload, traits::IdentifyAccount, MultiAddress}; @@ -87,7 +88,7 @@ impl ChainWithBalances for Westend { impl ChainWithTransactions for Westend { type AccountKeyPair = sp_core::sr25519::Pair; type SignedTransaction = - bp_polkadot_core::UncheckedExtrinsic; + bp_polkadot_core::UncheckedExtrinsic; fn sign_transaction( param: SignParam, @@ -95,7 +96,7 @@ impl ChainWithTransactions for Westend { ) -> Result { let raw_payload = SignedPayload::new( unsigned.call, - bp_westend::TransactionExtension::from_params( + bp_westend::SignedExtension::from_params( param.spec_version, param.transaction_version, unsigned.era, @@ -118,3 +119,8 @@ impl ChainWithTransactions for Westend { )) } } + +impl ChainWithRuntimeVersion for Westend { + const RUNTIME_VERSION: Option = + Some(SimpleRuntimeVersion { spec_version: 1_009_000, transaction_version: 24 }); +} diff --git a/relays/bin-substrate/Cargo.toml b/relays/bin-substrate/Cargo.toml deleted file mode 100644 index d5873752e22f852e8708f4802cf6336836d1f213..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/Cargo.toml +++ /dev/null @@ -1,66 +0,0 @@ -[package] -name = "substrate-relay" -version = "1.2.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -anyhow = "1.0" -async-std = "1.9.0" -async-trait = "0.1" -codec = { package = "parity-scale-codec", version = "3.1.5" } -env_logger = "0.11" -futures = "0.3.30" -hex = "0.4" -log = { workspace = true } -num-format = "0.4" -num-traits = "0.2" -rbtag = "0.3" -structopt = "0.3" -signal-hook = "0.3.15" -signal-hook-async-std = "0.2.2" -strum = { version = "0.26.2", features = ["derive"] } - -# Bridge dependencies -bp-bridge-hub-polkadot = { path = "../../primitives/chain-bridge-hub-polkadot" } -bp-bridge-hub-rococo = { path = "../../primitives/chain-bridge-hub-rococo" } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot-bulletin = { path = "../../primitives/chain-polkadot-bulletin" } -bp-polkadot = { path = "../../primitives/chain-polkadot" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-rococo = { path = "../../primitives/chain-rococo" } -bp-runtime = { path = "../../primitives/runtime" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -pallet-bridge-parachains = { path = "../../modules/parachains" } -parachains-relay = { path = "../parachains" } -relay-bridge-hub-kusama-client = { path = "../client-bridge-hub-kusama" } -relay-bridge-hub-polkadot-client = { path = "../client-bridge-hub-polkadot" } -relay-bridge-hub-rococo-client = { path = "../client-bridge-hub-rococo" } -relay-bridge-hub-westend-client = { path = "../client-bridge-hub-westend" } -relay-kusama-client = { path = "../client-kusama" } -relay-polkadot-client = { path = "../client-polkadot" } -relay-polkadot-bulletin-client = { path = "../client-polkadot-bulletin" } -relay-rococo-client = { path = "../client-rococo" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } -relay-westend-client = { path = "../client-westend" } -substrate-relay-helper = { path = "../lib-substrate-relay" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[dev-dependencies] -bp-test-utils = { path = "../../primitives/test-utils" } -hex-literal = "0.4" -sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -tempfile = "3.10" -finality-grandpa = { version = "0.16.2" } diff --git a/relays/bin-substrate/src/chains/kusama.rs b/relays/bin-substrate/src/chains/kusama.rs deleted file mode 100644 index 80ffdfed0037acec1c8b74f56186d70f8355dfa6..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/chains/kusama.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Kusama + Kusama parachains specification for CLI. - -use crate::cli::CliChain; -use relay_bridge_hub_kusama_client::BridgeHubKusama; -use relay_kusama_client::Kusama; -use relay_substrate_client::SimpleRuntimeVersion; - -impl CliChain for Kusama { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_001_002, transaction_version: 25 }); -} - -impl CliChain for BridgeHubKusama { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_001_000, transaction_version: 4 }); -} diff --git a/relays/bin-substrate/src/chains/mod.rs b/relays/bin-substrate/src/chains/mod.rs deleted file mode 100644 index ab15a9e679cd59aad2480232f107781e7f374d48..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/chains/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Chain-specific relayer configuration. - -mod kusama; -mod polkadot; -mod polkadot_bulletin; -mod rococo; -mod westend; diff --git a/relays/bin-substrate/src/chains/polkadot.rs b/relays/bin-substrate/src/chains/polkadot.rs deleted file mode 100644 index 1f1c607916a197ea04c855e9d6d744cf885a0ffd..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/chains/polkadot.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Polkadot + Polkadot parachains specification for CLI. - -use crate::cli::CliChain; -use relay_bridge_hub_polkadot_client::BridgeHubPolkadot; -use relay_polkadot_client::Polkadot; -use relay_substrate_client::SimpleRuntimeVersion; - -impl CliChain for Polkadot { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_001_002, transaction_version: 25 }); -} - -impl CliChain for BridgeHubPolkadot { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_001_000, transaction_version: 3 }); -} diff --git a/relays/bin-substrate/src/chains/polkadot_bulletin.rs b/relays/bin-substrate/src/chains/polkadot_bulletin.rs deleted file mode 100644 index ee7edbd9f423129d74efc4eac6058537f9f140d4..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/chains/polkadot_bulletin.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Polkadot + Polkadot parachains specification for CLI. - -use crate::cli::CliChain; -use relay_polkadot_bulletin_client::PolkadotBulletin; -use relay_substrate_client::SimpleRuntimeVersion; - -impl CliChain for PolkadotBulletin { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 100, transaction_version: 1 }); -} diff --git a/relays/bin-substrate/src/chains/rococo.rs b/relays/bin-substrate/src/chains/rococo.rs deleted file mode 100644 index 0640447fdc515ff0d99cd4fb9aa8148ee1d69e7f..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/chains/rococo.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Rococo + Rococo parachains specification for CLI. - -use crate::cli::CliChain; -use relay_bridge_hub_rococo_client::BridgeHubRococo; -use relay_rococo_client::Rococo; -use relay_substrate_client::SimpleRuntimeVersion; - -impl CliChain for Rococo { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_008_000, transaction_version: 24 }); -} - -impl CliChain for BridgeHubRococo { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_008_000, transaction_version: 4 }); -} diff --git a/relays/bin-substrate/src/chains/westend.rs b/relays/bin-substrate/src/chains/westend.rs deleted file mode 100644 index 41f5fc4e96c8fcc1ff8d9ab5d9183ba40f354f74..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/chains/westend.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Westend chain specification for CLI. - -use crate::cli::CliChain; -use relay_bridge_hub_westend_client::BridgeHubWestend; -use relay_substrate_client::SimpleRuntimeVersion; -use relay_westend_client::Westend; - -impl CliChain for Westend { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_008_000, transaction_version: 24 }); -} - -impl CliChain for BridgeHubWestend { - const RUNTIME_VERSION: Option = - Some(SimpleRuntimeVersion { spec_version: 1_008_000, transaction_version: 4 }); -} diff --git a/relays/bin-substrate/src/cli/bridge.rs b/relays/bin-substrate/src/cli/bridge.rs deleted file mode 100644 index 9457dfa5c0680f7f58e29cc68ff3ed1ad88ae6ec..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/cli/bridge.rs +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::cli::CliChain; -use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; -use relay_substrate_client::{Chain, ChainWithTransactions, Parachain, RelayChain}; -use strum::{EnumString, VariantNames}; -use substrate_relay_helper::{ - equivocation::SubstrateEquivocationDetectionPipeline, - finality::SubstrateFinalitySyncPipeline, - messages_lane::{MessagesRelayLimits, SubstrateMessageLane}, - parachains::SubstrateParachainsPipeline, -}; - -#[derive(Debug, PartialEq, Eq, EnumString, VariantNames)] -#[strum(serialize_all = "kebab_case")] -/// Supported full bridges (headers + messages). -pub enum FullBridge { - BridgeHubRococoToBridgeHubWestend, - BridgeHubWestendToBridgeHubRococo, - BridgeHubKusamaToBridgeHubPolkadot, - BridgeHubPolkadotToBridgeHubKusama, - PolkadotBulletinToBridgeHubPolkadot, - BridgeHubPolkadotToPolkadotBulletin, - RococoBulletinToBridgeHubRococo, - BridgeHubRococoToRococoBulletin, -} - -/// Minimal bridge representation that can be used from the CLI. -/// It connects a source chain to a target chain. -pub trait CliBridgeBase: Sized { - /// The source chain. - type Source: Chain + CliChain; - /// The target chain. - type Target: ChainWithTransactions + CliChain; -} - -/// Bridge representation that can be used from the CLI for relaying headers -/// from a relay chain to a relay chain. -pub trait RelayToRelayHeadersCliBridge: CliBridgeBase { - /// Finality proofs synchronization pipeline. - type Finality: SubstrateFinalitySyncPipeline< - SourceChain = Self::Source, - TargetChain = Self::Target, - >; -} - -/// Convenience trait that adds bounds to `CliBridgeBase`. -pub trait RelayToRelayEquivocationDetectionCliBridgeBase: CliBridgeBase { - type BoundedSource: ChainWithTransactions; -} - -impl RelayToRelayEquivocationDetectionCliBridgeBase for T -where - T: CliBridgeBase, - T::Source: ChainWithTransactions, -{ - type BoundedSource = T::Source; -} - -/// Bridge representation that can be used from the CLI for detecting equivocations -/// in the headers synchronized from a relay chain to a relay chain. -pub trait RelayToRelayEquivocationDetectionCliBridge: - RelayToRelayEquivocationDetectionCliBridgeBase -{ - /// Equivocation detection pipeline. - type Equivocation: SubstrateEquivocationDetectionPipeline< - SourceChain = Self::Source, - TargetChain = Self::Target, - >; -} - -/// Bridge representation that can be used from the CLI for relaying headers -/// from a parachain to a relay chain. -pub trait ParachainToRelayHeadersCliBridge: CliBridgeBase -where - Self::Source: Parachain, -{ - // The `CliBridgeBase` type represents the parachain in this situation. - // We need to add an extra type for the relay chain. - type SourceRelay: Chain - + CliChain - + RelayChain; - /// Finality proofs synchronization pipeline (source parachain -> target). - type ParachainFinality: SubstrateParachainsPipeline< - SourceRelayChain = Self::SourceRelay, - SourceParachain = Self::Source, - TargetChain = Self::Target, - >; - /// Finality proofs synchronization pipeline (source relay chain -> target). - type RelayFinality: SubstrateFinalitySyncPipeline< - SourceChain = Self::SourceRelay, - TargetChain = Self::Target, - >; -} - -/// Bridge representation that can be used from the CLI for relaying messages. -pub trait MessagesCliBridge: CliBridgeBase { - /// The Source -> Destination messages synchronization pipeline. - type MessagesLane: SubstrateMessageLane; - - /// Optional messages delivery transaction limits that the messages relay is going - /// to use. If it returns `None`, limits are estimated using `TransactionPayment` API - /// at the target chain. - fn maybe_messages_limits() -> Option { - None - } -} diff --git a/relays/bin-substrate/src/cli/chain_schema.rs b/relays/bin-substrate/src/cli/chain_schema.rs deleted file mode 100644 index 65559397ac2a7923c1bc34fc87938fe66da36221..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/cli/chain_schema.rs +++ /dev/null @@ -1,346 +0,0 @@ -// Copyright 2019-2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License - -// along with Parity Bridges Common. If not, see . - -use relay_substrate_client::{AccountKeyPairOf, ChainWithTransactions}; -use structopt::StructOpt; -use strum::{EnumString, VariantNames}; - -use crate::cli::CliChain; -pub use relay_substrate_client::{ChainRuntimeVersion, SimpleRuntimeVersion}; -use substrate_relay_helper::TransactionParams; - -#[doc = "Runtime version params."] -#[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy, EnumString, VariantNames)] -pub enum RuntimeVersionType { - /// Auto query version from chain - Auto, - /// Custom `spec_version` and `transaction_version` - Custom, - /// Read version from bundle dependencies directly. - Bundle, -} - -/// Create chain-specific set of runtime version parameters. -#[macro_export] -macro_rules! declare_chain_runtime_version_params_cli_schema { - ($chain:ident, $chain_prefix:ident) => { - bp_runtime::paste::item! { - #[doc = $chain " runtime version params."] - #[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy)] - pub struct [<$chain RuntimeVersionParams>] { - #[doc = "The type of runtime version for chain " $chain] - #[structopt(long, default_value = "Bundle")] - pub [<$chain_prefix _version_mode>]: RuntimeVersionType, - #[doc = "The custom sepc_version for chain " $chain] - #[structopt(long)] - pub [<$chain_prefix _spec_version>]: Option, - #[doc = "The custom transaction_version for chain " $chain] - #[structopt(long)] - pub [<$chain_prefix _transaction_version>]: Option, - } - - impl [<$chain RuntimeVersionParams>] { - /// Converts self into `ChainRuntimeVersion`. - pub fn into_runtime_version( - self, - bundle_runtime_version: Option, - ) -> anyhow::Result { - Ok(match self.[<$chain_prefix _version_mode>] { - RuntimeVersionType::Auto => ChainRuntimeVersion::Auto, - RuntimeVersionType::Custom => { - let custom_spec_version = self.[<$chain_prefix _spec_version>] - .ok_or_else(|| anyhow::Error::msg(format!("The {}-spec-version is required when choose custom mode", stringify!($chain_prefix))))?; - let custom_transaction_version = self.[<$chain_prefix _transaction_version>] - .ok_or_else(|| anyhow::Error::msg(format!("The {}-transaction-version is required when choose custom mode", stringify!($chain_prefix))))?; - ChainRuntimeVersion::Custom( - SimpleRuntimeVersion { - spec_version: custom_spec_version, - transaction_version: custom_transaction_version - } - ) - }, - RuntimeVersionType::Bundle => match bundle_runtime_version { - Some(runtime_version) => ChainRuntimeVersion::Custom(runtime_version), - None => { - return Err(anyhow::format_err!("Cannot use bundled runtime version of {}: it is not known to the relay", stringify!($chain_prefix))); - } - }, - }) - } - } - } - }; -} - -/// Create chain-specific set of runtime version parameters. -#[macro_export] -macro_rules! declare_chain_connection_params_cli_schema { - ($chain:ident, $chain_prefix:ident) => { - bp_runtime::paste::item! { - #[doc = $chain " connection params."] - #[derive(StructOpt, Debug, PartialEq, Eq, Clone)] - pub struct [<$chain ConnectionParams>] { - #[doc = "Connect to " $chain " node at given host."] - #[structopt(long, default_value = "127.0.0.1")] - pub [<$chain_prefix _host>]: String, - #[doc = "Connect to " $chain " node websocket server at given port."] - #[structopt(long, default_value = "9944")] - pub [<$chain_prefix _port>]: u16, - #[doc = "Use secure websocket connection."] - #[structopt(long)] - pub [<$chain_prefix _secure>]: bool, - #[doc = "Custom runtime version"] - #[structopt(flatten)] - pub [<$chain_prefix _runtime_version>]: [<$chain RuntimeVersionParams>], - } - - impl [<$chain ConnectionParams>] { - /// Convert connection params into Substrate client. - #[allow(dead_code)] - pub async fn into_client( - self, - ) -> anyhow::Result> { - let chain_runtime_version = self - .[<$chain_prefix _runtime_version>] - .into_runtime_version(Chain::RUNTIME_VERSION)?; - Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { - host: self.[<$chain_prefix _host>], - port: self.[<$chain_prefix _port>], - secure: self.[<$chain_prefix _secure>], - chain_runtime_version, - }) - .await - ) - } - } - } - }; -} - -/// Create chain-specific set of signing parameters. -#[macro_export] -macro_rules! declare_chain_signing_params_cli_schema { - ($chain:ident, $chain_prefix:ident) => { - bp_runtime::paste::item! { - #[doc = $chain " signing params."] - #[derive(StructOpt, Debug, PartialEq, Eq, Clone)] - pub struct [<$chain SigningParams>] { - #[doc = "The SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer>]: Option, - #[doc = "The password for the SURI of secret key to use when transactions are submitted to the " $chain " node."] - #[structopt(long)] - pub [<$chain_prefix _signer_password>]: Option, - - #[doc = "Path to the file, that contains SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer option."] - #[structopt(long)] - pub [<$chain_prefix _signer_file>]: Option, - #[doc = "Path to the file, that password for the SURI of secret key to use when transactions are submitted to the " $chain " node. Can be overridden with " $chain_prefix "_signer_password option."] - #[structopt(long)] - pub [<$chain_prefix _signer_password_file>]: Option, - - #[doc = "Transactions mortality period, in blocks. MUST be a power of two in [4; 65536] range. MAY NOT be larger than `BlockHashCount` parameter of the chain system module."] - #[structopt(long)] - pub [<$chain_prefix _transactions_mortality>]: Option, - } - - impl [<$chain SigningParams>] { - /// Return transactions mortality. - #[allow(dead_code)] - pub fn transactions_mortality(&self) -> anyhow::Result> { - self.[<$chain_prefix _transactions_mortality>] - .map(|transactions_mortality| { - if !(4..=65536).contains(&transactions_mortality) - || !transactions_mortality.is_power_of_two() - { - Err(anyhow::format_err!( - "Transactions mortality {} is not a power of two in a [4; 65536] range", - transactions_mortality, - )) - } else { - Ok(transactions_mortality) - } - }) - .transpose() - } - - /// Parse signing params into chain-specific KeyPair. - #[allow(dead_code)] - pub fn to_keypair(&self) -> anyhow::Result> { - let suri = match (self.[<$chain_prefix _signer>].as_ref(), self.[<$chain_prefix _signer_file>].as_ref()) { - (Some(suri), _) => suri.to_owned(), - (None, Some(suri_file)) => std::fs::read_to_string(suri_file) - .map_err(|err| anyhow::format_err!( - "Failed to read SURI from file {:?}: {}", - suri_file, - err, - ))?, - (None, None) => return Err(anyhow::format_err!( - "One of options must be specified: '{}' or '{}'", - stringify!([<$chain_prefix _signer>]), - stringify!([<$chain_prefix _signer_file>]), - )), - }; - - let suri_password = match ( - self.[<$chain_prefix _signer_password>].as_ref(), - self.[<$chain_prefix _signer_password_file>].as_ref(), - ) { - (Some(suri_password), _) => Some(suri_password.to_owned()), - (None, Some(suri_password_file)) => std::fs::read_to_string(suri_password_file) - .map(Some) - .map_err(|err| anyhow::format_err!( - "Failed to read SURI password from file {:?}: {}", - suri_password_file, - err, - ))?, - _ => None, - }; - - use sp_core::crypto::Pair; - - AccountKeyPairOf::::from_string( - &suri, - suri_password.as_deref() - ).map_err(|e| anyhow::format_err!("{:?}", e)) - } - - /// Return transaction parameters. - #[allow(dead_code)] - pub fn transaction_params( - &self, - ) -> anyhow::Result>> { - Ok(TransactionParams { - mortality: self.transactions_mortality()?, - signer: self.to_keypair::()?, - }) - } - } - } - }; -} - -/// Create chain-specific set of configuration objects: connection parameters, -/// signing parameters and bridge initialization parameters. -#[macro_export] -macro_rules! declare_chain_cli_schema { - ($chain:ident, $chain_prefix:ident) => { - $crate::declare_chain_runtime_version_params_cli_schema!($chain, $chain_prefix); - $crate::declare_chain_connection_params_cli_schema!($chain, $chain_prefix); - $crate::declare_chain_signing_params_cli_schema!($chain, $chain_prefix); - }; -} - -declare_chain_cli_schema!(Source, source); -declare_chain_cli_schema!(Target, target); -declare_chain_cli_schema!(Relaychain, relaychain); -declare_chain_cli_schema!(Parachain, parachain); - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::Pair; - - #[test] - fn reads_suri_from_file() { - const ALICE: &str = "//Alice"; - const BOB: &str = "//Bob"; - const ALICE_PASSWORD: &str = "alice_password"; - const BOB_PASSWORD: &str = "bob_password"; - - let alice: sp_core::sr25519::Pair = Pair::from_string(ALICE, Some(ALICE_PASSWORD)).unwrap(); - let bob: sp_core::sr25519::Pair = Pair::from_string(BOB, Some(BOB_PASSWORD)).unwrap(); - let bob_with_alice_password = - sp_core::sr25519::Pair::from_string(BOB, Some(ALICE_PASSWORD)).unwrap(); - - let temp_dir = tempfile::tempdir().unwrap(); - let mut suri_file_path = temp_dir.path().to_path_buf(); - let mut password_file_path = temp_dir.path().to_path_buf(); - suri_file_path.push("suri"); - password_file_path.push("password"); - std::fs::write(&suri_file_path, BOB.as_bytes()).unwrap(); - std::fs::write(&password_file_path, BOB_PASSWORD.as_bytes()).unwrap(); - - // when both seed and password are read from file - assert_eq!( - TargetSigningParams { - target_signer: Some(ALICE.into()), - target_signer_password: Some(ALICE_PASSWORD.into()), - - target_signer_file: None, - target_signer_password_file: None, - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(alice.public()), - ); - - // when both seed and password are read from file - assert_eq!( - TargetSigningParams { - target_signer: None, - target_signer_password: None, - - target_signer_file: Some(suri_file_path.clone()), - target_signer_password_file: Some(password_file_path.clone()), - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(bob.public()), - ); - - // when password are is overriden by cli option - assert_eq!( - TargetSigningParams { - target_signer: None, - target_signer_password: Some(ALICE_PASSWORD.into()), - - target_signer_file: Some(suri_file_path.clone()), - target_signer_password_file: Some(password_file_path.clone()), - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(bob_with_alice_password.public()), - ); - - // when both seed and password are overriden by cli options - assert_eq!( - TargetSigningParams { - target_signer: Some(ALICE.into()), - target_signer_password: Some(ALICE_PASSWORD.into()), - - target_signer_file: Some(suri_file_path), - target_signer_password_file: Some(password_file_path), - - target_transactions_mortality: None, - } - .to_keypair::() - .map(|p| p.public()) - .map_err(drop), - Ok(alice.public()), - ); - } -} diff --git a/relays/bin-substrate/src/cli/mod.rs b/relays/bin-substrate/src/cli/mod.rs deleted file mode 100644 index 6d799023cdbc0b1826ef6f20745eaf8663a53c97..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/cli/mod.rs +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Deal with CLI args of substrate-to-substrate relay. - -use async_std::prelude::*; -use codec::{Decode, Encode}; -use futures::{select, FutureExt}; -use rbtag::BuildInfo; -use signal_hook::consts::*; -use signal_hook_async_std::Signals; -use structopt::{clap::arg_enum, StructOpt}; -use strum::{EnumString, VariantNames}; - -use bp_messages::LaneId; -use relay_substrate_client::SimpleRuntimeVersion; - -pub(crate) mod bridge; - -mod chain_schema; -mod detect_equivocations; -mod init_bridge; -mod relay_headers; -mod relay_headers_and_messages; -mod relay_messages; -mod relay_parachains; - -/// The target that will be used when publishing logs related to this pallet. -pub const LOG_TARGET: &str = "bridge"; - -/// Parse relay CLI args. -pub fn parse_args() -> Command { - Command::from_args() -} - -/// Substrate-to-Substrate bridge utilities. -#[derive(StructOpt)] -#[structopt(about = "Substrate-to-Substrate relay")] -pub enum Command { - /// Start headers relay between two chains. - /// - /// The on-chain bridge component should have been already initialized with - /// `init-bridge` sub-command. - RelayHeaders(relay_headers::RelayHeaders), - /// Start messages relay between two chains. - /// - /// Ties up to `Messages` pallets on both chains and starts relaying messages. - /// Requires the header relay to be already running. - RelayMessages(relay_messages::RelayMessages), - /// Start headers and messages relay between two Substrate chains. - /// - /// This high-level relay internally starts four low-level relays: two `RelayHeaders` - /// and two `RelayMessages` relays. Headers are only relayed when they are required by - /// the message relays - i.e. when there are messages or confirmations that needs to be - /// relayed between chains. - RelayHeadersAndMessages(Box), - /// Initialize on-chain bridge pallet with current header data. - /// - /// Sends initialization transaction to bootstrap the bridge with current finalized block data. - InitBridge(init_bridge::InitBridge), - /// Relay parachain heads. - RelayParachains(relay_parachains::RelayParachains), - /// Detect and report equivocations. - /// - /// Parses the source chain headers that were synchronized with the target chain looking for - /// equivocations. If any equivocation is found, it is reported to the source chain. - DetectEquivocations(detect_equivocations::DetectEquivocations), -} - -impl Command { - // Initialize logger depending on the command. - fn init_logger(&self) { - use relay_utils::initialize::{initialize_logger, initialize_relay}; - - match self { - Self::RelayHeaders(_) | - Self::RelayMessages(_) | - Self::RelayHeadersAndMessages(_) | - Self::InitBridge(_) => { - initialize_relay(); - }, - _ => { - initialize_logger(false); - }, - } - } - - /// Run the command. - async fn do_run(self) -> anyhow::Result<()> { - match self { - Self::RelayHeaders(arg) => arg.run().await?, - Self::RelayMessages(arg) => arg.run().await?, - Self::RelayHeadersAndMessages(arg) => arg.run().await?, - Self::InitBridge(arg) => arg.run().await?, - Self::RelayParachains(arg) => arg.run().await?, - Self::DetectEquivocations(arg) => arg.run().await?, - } - Ok(()) - } - - /// Run the command. - pub async fn run(self) { - self.init_logger(); - - let exit_signals = match Signals::new([SIGINT, SIGTERM]) { - Ok(signals) => signals, - Err(e) => { - log::error!(target: LOG_TARGET, "Could not register exit signals: {}", e); - return - }, - }; - let run = self.do_run().fuse(); - futures::pin_mut!(exit_signals, run); - - select! { - signal = exit_signals.next().fuse() => { - log::info!(target: LOG_TARGET, "Received exit signal {:?}", signal); - }, - result = run => { - if let Err(e) = result { - log::error!(target: LOG_TARGET, "substrate-relay: {}", e); - } - }, - } - } -} - -arg_enum! { - #[derive(Debug)] - /// The origin to use when dispatching the message on the target chain. - /// - /// - `Target` uses account existing on the target chain (requires target private key). - /// - `Origin` uses account derived from the source-chain account. - pub enum Origins { - Target, - Source, - } -} - -/// Bridge-supported network definition. -/// -/// Used to abstract away CLI commands. -pub trait CliChain: relay_substrate_client::Chain { - /// Current version of the chain runtime, known to relay. - /// - /// can be `None` if relay is not going to submit transactions to that chain. - const RUNTIME_VERSION: Option; -} - -/// Lane id. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct HexLaneId(pub [u8; 4]); - -impl From for LaneId { - fn from(lane_id: HexLaneId) -> LaneId { - LaneId(lane_id.0) - } -} - -impl std::str::FromStr for HexLaneId { - type Err = hex::FromHexError; - - fn from_str(s: &str) -> Result { - let mut lane_id = [0u8; 4]; - hex::decode_to_slice(s, &mut lane_id)?; - Ok(HexLaneId(lane_id)) - } -} - -/// Nicer formatting for raw bytes vectors. -#[derive(Default, Encode, Decode, PartialEq, Eq)] -pub struct HexBytes(pub Vec); - -impl std::str::FromStr for HexBytes { - type Err = hex::FromHexError; - - fn from_str(s: &str) -> Result { - Ok(Self(hex::decode(s)?)) - } -} - -impl std::fmt::Debug for HexBytes { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "0x{self}") - } -} - -impl std::fmt::Display for HexBytes { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "{}", hex::encode(&self.0)) - } -} - -/// Prometheus metrics params. -#[derive(Clone, Debug, PartialEq, StructOpt)] -pub struct PrometheusParams { - /// Do not expose a Prometheus metric endpoint. - #[structopt(long)] - pub no_prometheus: bool, - /// Expose Prometheus endpoint at given interface. - #[structopt(long, default_value = "127.0.0.1")] - pub prometheus_host: String, - /// Expose Prometheus endpoint at given port. - #[structopt(long, default_value = "9616")] - pub prometheus_port: u16, -} - -/// Struct to get git commit info and build time. -#[derive(BuildInfo)] -struct SubstrateRelayBuildInfo; - -impl SubstrateRelayBuildInfo { - /// Get git commit in form ``. - pub fn get_git_commit() -> String { - // on gitlab we use images without git installed, so we can't use `rbtag` there - // locally we don't have `CI_*` env variables, so we can't rely on them - // => we are using `CI_*` env variables or else `rbtag` - let maybe_sha_from_ci = option_env!("CI_COMMIT_SHORT_SHA"); - maybe_sha_from_ci - .map(|short_sha| { - // we assume that on CI the copy is always clean - format!("{short_sha}-clean") - }) - .unwrap_or_else(|| SubstrateRelayBuildInfo.get_build_commit().into()) - } -} - -impl PrometheusParams { - /// Tries to convert CLI metrics params into metrics params, used by the relay. - pub fn into_metrics_params(self) -> anyhow::Result { - let metrics_address = if !self.no_prometheus { - Some(relay_utils::metrics::MetricsAddress { - host: self.prometheus_host, - port: self.prometheus_port, - }) - } else { - None - }; - - let relay_version = option_env!("CARGO_PKG_VERSION").unwrap_or("unknown"); - let relay_commit = SubstrateRelayBuildInfo::get_git_commit(); - relay_utils::metrics::MetricsParams::new( - metrics_address, - relay_version.into(), - relay_commit, - ) - .map_err(|e| anyhow::format_err!("{:?}", e)) - } -} - -/// Either explicit or maximal allowed value. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum ExplicitOrMaximal { - /// User has explicitly specified argument value. - Explicit(V), - /// Maximal allowed value for this argument. - Maximal, -} - -impl std::str::FromStr for ExplicitOrMaximal -where - V::Err: std::fmt::Debug, -{ - type Err = String; - - fn from_str(s: &str) -> Result { - if s.to_lowercase() == "max" { - return Ok(ExplicitOrMaximal::Maximal) - } - - V::from_str(s) - .map(ExplicitOrMaximal::Explicit) - .map_err(|e| format!("Failed to parse '{e:?}'. Expected 'max' or explicit value")) - } -} - -#[doc = "Runtime version params."] -#[derive(StructOpt, Debug, PartialEq, Eq, Clone, Copy, EnumString, VariantNames)] -pub enum RuntimeVersionType { - /// Auto query version from chain - Auto, - /// Custom `spec_version` and `transaction_version` - Custom, - /// Read version from bundle dependencies directly. - Bundle, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn hex_bytes_display_matches_from_str_for_clap() { - // given - let hex = HexBytes(vec![1, 2, 3, 4]); - let display = format!("{hex}"); - - // when - let hex2: HexBytes = display.parse().unwrap(); - - // then - assert_eq!(hex.0, hex2.0); - } -} diff --git a/relays/bin-substrate/src/cli/relay_headers_and_messages/parachain_to_parachain.rs b/relays/bin-substrate/src/cli/relay_headers_and_messages/parachain_to_parachain.rs deleted file mode 100644 index da286ce5d58a89e3cfb16797f199ec52bd1e51a3..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/cli/relay_headers_and_messages/parachain_to_parachain.rs +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2019-2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use async_trait::async_trait; -use std::sync::Arc; - -use crate::cli::{ - bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, - relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, - CliChain, -}; -use bp_polkadot_core::parachains::ParaHash; -use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, Chain, ChainWithTransactions, Client, Parachain, -}; -use sp_core::Pair; -use substrate_relay_helper::{ - finality::SubstrateFinalitySyncPipeline, - on_demand::{ - headers::OnDemandHeadersRelay, parachains::OnDemandParachainsRelay, OnDemandRelay, - }, -}; - -/// A base relay between two parachain from different consensus systems. -/// -/// Such relay starts 2 messages relay. It also starts 2 on-demand header relays and 2 on-demand -/// parachain heads relay. -pub struct ParachainToParachainBridge< - L2R: MessagesCliBridge + ParachainToRelayHeadersCliBridge, - R2L: MessagesCliBridge + ParachainToRelayHeadersCliBridge, -> where - ::Source: Parachain, - ::Source: Parachain, -{ - /// Parameters that are shared by all bridge types. - pub common: - Full2WayBridgeCommonParams<::Target, ::Target>, - /// Client of the left relay chain. - pub left_relay: Client<::SourceRelay>, - /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, -} - -macro_rules! declare_parachain_to_parachain_bridge_schema { - // left-parachain, relay-chain-of-left-parachain, right-parachain, relay-chain-of-right-parachain - ($left_parachain:ident, $left_chain:ident, $right_parachain:ident, $right_chain:ident) => { - bp_runtime::paste::item! { - #[doc = $left_parachain ", " $left_chain ", " $right_parachain " and " $right_chain " headers+parachains+messages relay params."] - #[derive(Debug, PartialEq, StructOpt)] - pub struct [<$left_parachain $right_parachain HeadersAndMessages>] { - // shared parameters - #[structopt(flatten)] - shared: HeadersAndMessagesSharedParams, - - #[structopt(flatten)] - left: [<$left_parachain ConnectionParams>], - // default signer, which is always used to sign messages relay transactions on the left chain - #[structopt(flatten)] - left_sign: [<$left_parachain SigningParams>], - - #[structopt(flatten)] - left_relay: [<$left_chain ConnectionParams>], - - #[structopt(flatten)] - right: [<$right_parachain ConnectionParams>], - // default signer, which is always used to sign messages relay transactions on the right chain - #[structopt(flatten)] - right_sign: [<$right_parachain SigningParams>], - - #[structopt(flatten)] - right_relay: [<$right_chain ConnectionParams>], - } - - impl [<$left_parachain $right_parachain HeadersAndMessages>] { - async fn into_bridge< - Left: ChainWithTransactions + CliChain + Parachain, - LeftRelay: CliChain, - Right: ChainWithTransactions + CliChain + Parachain, - RightRelay: CliChain, - L2R: CliBridgeBase - + MessagesCliBridge - + ParachainToRelayHeadersCliBridge, - R2L: CliBridgeBase - + MessagesCliBridge - + ParachainToRelayHeadersCliBridge, - >( - self, - ) -> anyhow::Result> { - Ok(ParachainToParachainBridge { - common: Full2WayBridgeCommonParams::new::( - self.shared, - BridgeEndCommonParams { - client: self.left.into_client::().await?, - tx_params: self.left_sign.transaction_params::()?, - accounts: vec![], - }, - BridgeEndCommonParams { - client: self.right.into_client::().await?, - tx_params: self.right_sign.transaction_params::()?, - accounts: vec![], - }, - )?, - left_relay: self.left_relay.into_client::().await?, - right_relay: self.right_relay.into_client::().await?, - }) - } - } - } - }; -} - -#[async_trait] -impl< - Left: Chain + ChainWithTransactions + CliChain + Parachain, - Right: Chain + ChainWithTransactions + CliChain + Parachain, - LeftRelay: Chain - + CliChain, - RightRelay: Chain - + CliChain, - L2R: CliBridgeBase - + MessagesCliBridge - + ParachainToRelayHeadersCliBridge, - R2L: CliBridgeBase - + MessagesCliBridge - + ParachainToRelayHeadersCliBridge, - > Full2WayBridgeBase for ParachainToParachainBridge -where - AccountIdOf: From< as Pair>::Public>, - AccountIdOf: From< as Pair>::Public>, -{ - type Params = ParachainToParachainBridge; - type Left = Left; - type Right = Right; - - fn common(&self) -> &Full2WayBridgeCommonParams { - &self.common - } - - fn mut_common(&mut self) -> &mut Full2WayBridgeCommonParams { - &mut self.common - } - - async fn start_on_demand_headers_relayers( - &mut self, - ) -> anyhow::Result<( - Arc>, - Arc>, - )> { - ::RelayFinality::start_relay_guards( - &self.common.right.client, - self.common.right.client.can_start_version_guard(), - ) - .await?; - ::RelayFinality::start_relay_guards( - &self.common.left.client, - self.common.left.client.can_start_version_guard(), - ) - .await?; - - let left_relay_to_right_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.left_relay.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); - - let left_to_right_on_demand_parachains = OnDemandParachainsRelay::< - ::ParachainFinality, - >::new( - self.left_relay.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - Arc::new(left_relay_to_right_on_demand_headers), - ); - let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< - ::ParachainFinality, - >::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - Arc::new(right_relay_to_left_on_demand_headers), - ); - - Ok(( - Arc::new(left_to_right_on_demand_parachains), - Arc::new(right_to_left_on_demand_parachains), - )) - } -} diff --git a/relays/bin-substrate/src/cli/relay_headers_and_messages/relay_to_parachain.rs b/relays/bin-substrate/src/cli/relay_headers_and_messages/relay_to_parachain.rs deleted file mode 100644 index f7bf28c72fd666bca091172302369a94a5d9dbb8..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/cli/relay_headers_and_messages/relay_to_parachain.rs +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2019-2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use async_trait::async_trait; -use std::sync::Arc; - -use crate::cli::{ - bridge::{ - CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge, - RelayToRelayHeadersCliBridge, - }, - relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, - CliChain, -}; -use bp_polkadot_core::parachains::ParaHash; -use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, Chain, ChainWithTransactions, Client, Parachain, -}; -use sp_core::Pair; -use substrate_relay_helper::{ - finality::SubstrateFinalitySyncPipeline, - on_demand::{ - headers::OnDemandHeadersRelay, parachains::OnDemandParachainsRelay, OnDemandRelay, - }, -}; - -/// A base relay between standalone (relay) chain and a parachain from another consensus system. -/// -/// Such relay starts 2 messages relay. It also starts 2 on-demand header relays and 1 on-demand -/// parachain heads relay. -pub struct RelayToParachainBridge< - L2R: MessagesCliBridge + RelayToRelayHeadersCliBridge, - R2L: MessagesCliBridge + ParachainToRelayHeadersCliBridge, -> where - ::Source: Parachain, -{ - /// Parameters that are shared by all bridge types. - pub common: - Full2WayBridgeCommonParams<::Target, ::Target>, - /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, -} - -macro_rules! declare_relay_to_parachain_bridge_schema { - // chain, parachain, relay-chain-of-parachain - ($left_chain:ident, $right_parachain:ident, $right_chain:ident) => { - bp_runtime::paste::item! { - #[doc = $left_chain ", " $right_parachain " and " $right_chain " headers+parachains+messages relay params."] - #[derive(Debug, PartialEq, StructOpt)] - pub struct [<$left_chain $right_parachain HeadersAndMessages>] { - // shared parameters - #[structopt(flatten)] - shared: HeadersAndMessagesSharedParams, - - #[structopt(flatten)] - left: [<$left_chain ConnectionParams>], - // default signer, which is always used to sign messages relay transactions on the left chain - #[structopt(flatten)] - left_sign: [<$left_chain SigningParams>], - - #[structopt(flatten)] - right: [<$right_parachain ConnectionParams>], - // default signer, which is always used to sign messages relay transactions on the right chain - #[structopt(flatten)] - right_sign: [<$right_parachain SigningParams>], - - #[structopt(flatten)] - right_relay: [<$right_chain ConnectionParams>], - } - - impl [<$left_chain $right_parachain HeadersAndMessages>] { - async fn into_bridge< - Left: ChainWithTransactions + CliChain, - Right: ChainWithTransactions + CliChain + Parachain, - RightRelay: CliChain, - L2R: CliBridgeBase + MessagesCliBridge + RelayToRelayHeadersCliBridge, - R2L: CliBridgeBase - + MessagesCliBridge - + ParachainToRelayHeadersCliBridge, - >( - self, - ) -> anyhow::Result> { - Ok(RelayToParachainBridge { - common: Full2WayBridgeCommonParams::new::( - self.shared, - BridgeEndCommonParams { - client: self.left.into_client::().await?, - tx_params: self.left_sign.transaction_params::()?, - accounts: vec![], - }, - BridgeEndCommonParams { - client: self.right.into_client::().await?, - tx_params: self.right_sign.transaction_params::()?, - accounts: vec![], - }, - )?, - right_relay: self.right_relay.into_client::().await?, - }) - } - } - } - }; -} - -#[async_trait] -impl< - Left: ChainWithTransactions + CliChain, - Right: Chain + ChainWithTransactions + CliChain + Parachain, - RightRelay: Chain - + CliChain, - L2R: CliBridgeBase - + MessagesCliBridge - + RelayToRelayHeadersCliBridge, - R2L: CliBridgeBase - + MessagesCliBridge - + ParachainToRelayHeadersCliBridge, - > Full2WayBridgeBase for RelayToParachainBridge -where - AccountIdOf: From< as Pair>::Public>, - AccountIdOf: From< as Pair>::Public>, -{ - type Params = RelayToParachainBridge; - type Left = Left; - type Right = Right; - - fn common(&self) -> &Full2WayBridgeCommonParams { - &self.common - } - - fn mut_common(&mut self) -> &mut Full2WayBridgeCommonParams { - &mut self.common - } - - async fn start_on_demand_headers_relayers( - &mut self, - ) -> anyhow::Result<( - Arc>, - Arc>, - )> { - ::Finality::start_relay_guards( - &self.common.right.client, - self.common.right.client.can_start_version_guard(), - ) - .await?; - ::RelayFinality::start_relay_guards( - &self.common.left.client, - self.common.left.client.can_start_version_guard(), - ) - .await?; - - let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( - self.common.left.client.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - self.common.shared.headers_to_relay(), - None, - ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); - let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< - ::ParachainFinality, - >::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - Arc::new(right_relay_to_left_on_demand_headers), - ); - - Ok(( - Arc::new(left_to_right_on_demand_headers), - Arc::new(right_to_left_on_demand_parachains), - )) - } -} diff --git a/relays/bin-substrate/src/cli/relay_headers_and_messages/relay_to_relay.rs b/relays/bin-substrate/src/cli/relay_headers_and_messages/relay_to_relay.rs deleted file mode 100644 index 1ed2b3d4d9c7a900bd0c1efe52fe7f40a1d1abc3..0000000000000000000000000000000000000000 --- a/relays/bin-substrate/src/cli/relay_headers_and_messages/relay_to_relay.rs +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2019-2022 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -// we don't have any relay/standalone <> relay/standalone chain bridges, but we may need it in a -// future -#![allow(unused_macros)] - -use async_trait::async_trait; -use std::sync::Arc; - -use crate::cli::{ - bridge::{CliBridgeBase, MessagesCliBridge, RelayToRelayHeadersCliBridge}, - relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, - CliChain, -}; -use relay_substrate_client::{AccountIdOf, AccountKeyPairOf, ChainWithTransactions}; -use sp_core::Pair; -use substrate_relay_helper::{ - finality::SubstrateFinalitySyncPipeline, - on_demand::{headers::OnDemandHeadersRelay, OnDemandRelay}, -}; - -/// A base relay between two standalone (relay) chains. -/// -/// Such relay starts 2 messages relay and 2 on-demand header relays. -pub struct RelayToRelayBridge< - L2R: MessagesCliBridge + RelayToRelayHeadersCliBridge, - R2L: MessagesCliBridge + RelayToRelayHeadersCliBridge, -> { - /// Parameters that are shared by all bridge types. - pub common: - Full2WayBridgeCommonParams<::Target, ::Target>, -} - -macro_rules! declare_relay_to_relay_bridge_schema { - ($left_chain:ident, $right_chain:ident) => { - bp_runtime::paste::item! { - #[doc = $left_chain " and " $right_chain " headers+messages relay params."] - #[derive(Debug, PartialEq, StructOpt)] - pub struct [<$left_chain $right_chain HeadersAndMessages>] { - #[structopt(flatten)] - shared: HeadersAndMessagesSharedParams, - - #[structopt(flatten)] - left: [<$left_chain ConnectionParams>], - // default signer, which is always used to sign messages relay transactions on the left chain - #[structopt(flatten)] - left_sign: [<$left_chain SigningParams>], - - #[structopt(flatten)] - right: [<$right_chain ConnectionParams>], - #[structopt(flatten)] - // default signer, which is always used to sign messages relay transactions on the right chain - right_sign: [<$right_chain SigningParams>], - } - - impl [<$left_chain $right_chain HeadersAndMessages>] { - async fn into_bridge< - Left: ChainWithTransactions + CliChain, - Right: ChainWithTransactions + CliChain, - L2R: CliBridgeBase + MessagesCliBridge + RelayToRelayHeadersCliBridge, - R2L: CliBridgeBase + MessagesCliBridge + RelayToRelayHeadersCliBridge, - >( - self, - ) -> anyhow::Result> { - Ok(RelayToRelayBridge { - common: Full2WayBridgeCommonParams::new::( - self.shared, - BridgeEndCommonParams { - client: self.left.into_client::().await?, - tx_params: self.left_sign.transaction_params::()?, - accounts: vec![], - }, - BridgeEndCommonParams { - client: self.right.into_client::().await?, - tx_params: self.right_sign.transaction_params::()?, - accounts: vec![], - }, - )?, - right_to_left_transaction_params: self.left_sign.transaction_params::(), - left_to_right_transaction_params: self.right_sign.transaction_params::(), - }) - } - } - } - }; -} - -#[async_trait] -impl< - Left: ChainWithTransactions + CliChain, - Right: ChainWithTransactions + CliChain, - L2R: CliBridgeBase - + MessagesCliBridge - + RelayToRelayHeadersCliBridge, - R2L: CliBridgeBase - + MessagesCliBridge - + RelayToRelayHeadersCliBridge, - > Full2WayBridgeBase for RelayToRelayBridge -where - AccountIdOf: From< as Pair>::Public>, - AccountIdOf: From< as Pair>::Public>, -{ - type Params = RelayToRelayBridge; - type Left = Left; - type Right = Right; - - fn common(&self) -> &Full2WayBridgeCommonParams { - &self.common - } - - fn mut_common(&mut self) -> &mut Full2WayBridgeCommonParams { - &mut self.common - } - - async fn start_on_demand_headers_relayers( - &mut self, - ) -> anyhow::Result<( - Arc>, - Arc>, - )> { - ::Finality::start_relay_guards( - &self.common.right.client, - self.common.right.client.can_start_version_guard(), - ) - .await?; - ::Finality::start_relay_guards( - &self.common.left.client, - self.common.left.client.can_start_version_guard(), - ) - .await?; - - let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( - self.common.left.client.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - self.common.shared.headers_to_relay(), - None, - ); - let right_to_left_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( - self.common.right.client.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - None, - ); - - Ok((Arc::new(left_to_right_on_demand_headers), Arc::new(right_to_left_on_demand_headers))) - } -} diff --git a/relays/client-bridge-hub-kusama/Cargo.toml b/relays/client-bridge-hub-kusama/Cargo.toml deleted file mode 100644 index 30177443c3e50b382bc1222e80697997e1db538c..0000000000000000000000000000000000000000 --- a/relays/client-bridge-hub-kusama/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "relay-bridge-hub-kusama-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -#relay-substrate-client = { path = "../client-substrate" } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-bridge-hub-kusama = { path = "../../primitives/chain-bridge-hub-kusama" } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot = { path = "../../primitives/chain-polkadot" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -relay-substrate-client = { path = "../client-substrate" } - -# Substrate Dependencies - -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-bridge-hub-polkadot/Cargo.toml b/relays/client-bridge-hub-polkadot/Cargo.toml deleted file mode 100644 index a85e2d685c486b00b62dcbdd821b225c7449f983..0000000000000000000000000000000000000000 --- a/relays/client-bridge-hub-polkadot/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "relay-bridge-hub-polkadot-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-bridge-hub-polkadot = { path = "../../primitives/chain-bridge-hub-polkadot" } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot = { path = "../../primitives/chain-polkadot" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-kusama = { path = "../../primitives/chain-kusama" } -bp-runtime = { path = "../../primitives/runtime" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -relay-substrate-client = { path = "../client-substrate" } - -# Substrate Dependencies - -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-bridge-hub-rococo/Cargo.toml b/relays/client-bridge-hub-rococo/Cargo.toml deleted file mode 100644 index ea59240fd92741b76bce7de4cc9cba2bbd1664a6..0000000000000000000000000000000000000000 --- a/relays/client-bridge-hub-rococo/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "relay-bridge-hub-rococo-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-bridge-hub-rococo = { path = "../../primitives/chain-bridge-hub-rococo" } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } - -bridge-runtime-common = { path = "../../bin/runtime-common" } -relay-substrate-client = { path = "../client-substrate" } - -# Substrate Dependencies - -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-bridge-hub-westend/Cargo.toml b/relays/client-bridge-hub-westend/Cargo.toml deleted file mode 100644 index 18b4f3230ede25fbd65ca03a402d41d47398120d..0000000000000000000000000000000000000000 --- a/relays/client-bridge-hub-westend/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "relay-bridge-hub-westend-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-bridge-hub-westend = { path = "../../primitives/chain-bridge-hub-westend" } -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-rococo = { path = "../../primitives/chain-rococo" } - -bridge-runtime-common = { path = "../../bin/runtime-common" } -relay-substrate-client = { path = "../client-substrate" } - -# Substrate Dependencies - -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[dev-dependencies] -bp-runtime = { path = "../../primitives/runtime" } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-kusama/Cargo.toml b/relays/client-kusama/Cargo.toml deleted file mode 100644 index 3fa32aa4844a58f718e8c2c2046f2eb38d391f59..0000000000000000000000000000000000000000 --- a/relays/client-kusama/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "relay-kusama-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-kusama = { path = "../../primitives/chain-kusama" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } - -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Substrate Dependencies - -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-polkadot-bulletin/Cargo.toml b/relays/client-polkadot-bulletin/Cargo.toml deleted file mode 100644 index b0eafc963389a36ac0f1cd6888d66cf0d0e54355..0000000000000000000000000000000000000000 --- a/relays/client-polkadot-bulletin/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "relay-polkadot-bulletin-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-polkadot-bulletin = { path = "../../primitives/chain-polkadot-bulletin" } -bp-runtime = { path = "../../primitives/runtime" } -bridge-runtime-common = { path = "../../bin/runtime-common" } -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Substrate Dependencies - -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-polkadot/Cargo.toml b/relays/client-polkadot/Cargo.toml deleted file mode 100644 index 52c836e14566a8f85a2ad667b781509d8660fd04..0000000000000000000000000000000000000000 --- a/relays/client-polkadot/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "relay-polkadot-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-polkadot = { path = "../../primitives/chain-polkadot" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } - -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Substrate Dependencies - -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/client-substrate/Cargo.toml b/relays/client-substrate/Cargo.toml deleted file mode 100644 index 7ff2e2f45e727c1f4e56bc7f496556bad09d4fbb..0000000000000000000000000000000000000000 --- a/relays/client-substrate/Cargo.toml +++ /dev/null @@ -1,59 +0,0 @@ -[package] -name = "relay-substrate-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -async-std = { version = "1.6.5", features = ["attributes"] } -async-trait = "0.1" -codec = { package = "parity-scale-codec", version = "3.1.5" } -futures = "0.3.30" -jsonrpsee = { version = "0.17", features = ["macros", "ws-client"] } -log = { workspace = true } -num-traits = "0.2" -rand = "0.8" -scale-info = { version = "2.10.0", features = ["derive"] } -tokio = { version = "1.36", features = ["rt-multi-thread"] } -thiserror = { workspace = true } - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-messages = { path = "../../primitives/messages" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } -pallet-bridge-messages = { path = "../../modules/messages" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -pallet-utility = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sc-chain-spec = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sc-rpc-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-std = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-version = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -# Polkadot Dependencies - -xcm = { package = "staging-xcm", git = "https://github.com/paritytech/polkadot-sdk", branch = "master", default-features = false } - -[features] -default = [] -test-helpers = [] diff --git a/relays/client-substrate/src/calls.rs b/relays/client-substrate/src/calls.rs deleted file mode 100644 index 71b9ec84aca30cc2384b50d4c99466798f46d72f..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/calls.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Basic runtime calls. - -use codec::{Decode, Encode}; -use scale_info::TypeInfo; -use sp_std::{boxed::Box, vec::Vec}; - -use xcm::{VersionedLocation, VersionedXcm}; - -/// A minimized version of `frame-system::Call` that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum SystemCall { - /// `frame-system::Call::remark` - #[codec(index = 1)] - remark(Vec), -} - -/// A minimized version of `pallet-utility::Call` that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum UtilityCall { - /// `pallet-utility::Call::batch_all` - #[codec(index = 2)] - batch_all(Vec), -} - -/// A minimized version of `pallet-sudo::Call` that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum SudoCall { - /// `pallet-sudo::Call::sudo` - #[codec(index = 0)] - sudo(Box), -} - -/// A minimized version of `pallet-xcm::Call`, that can be used without a runtime. -#[derive(Encode, Decode, Debug, PartialEq, Eq, Clone, TypeInfo)] -#[allow(non_camel_case_types)] -pub enum XcmCall { - /// `pallet-xcm::Call::send` - #[codec(index = 0)] - send(Box, Box>), -} diff --git a/relays/client-substrate/src/chain.rs b/relays/client-substrate/src/chain.rs deleted file mode 100644 index cd410f0fbdc35cdbd2ea99a7c805c6a986c59fa3..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/chain.rs +++ /dev/null @@ -1,284 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::calls::UtilityCall; - -use bp_header_chain::ChainWithGrandpa as ChainWithGrandpaBase; -use bp_messages::ChainWithMessages as ChainWithMessagesBase; -use bp_runtime::{ - Chain as ChainBase, EncodedOrDecodedCall, HashOf, Parachain as ParachainBase, TransactionEra, - TransactionEraOf, UnderlyingChainProvider, -}; -use codec::{Codec, Decode, Encode}; -use jsonrpsee::core::{DeserializeOwned, Serialize}; -use num_traits::Zero; -use sc_transaction_pool_api::TransactionStatus; -use scale_info::TypeInfo; -use sp_core::{storage::StorageKey, Pair}; -use sp_runtime::{ - generic::SignedBlock, - traits::{Block as BlockT, Member}, - ConsensusEngineId, EncodedJustification, -}; -use std::{fmt::Debug, time::Duration}; - -/// Substrate-based chain from minimal relay-client point of view. -pub trait Chain: ChainBase + Clone { - /// Chain name. - const NAME: &'static str; - /// Name of the runtime API method that is returning best known finalized header number - /// and hash (as tuple). - /// - /// Keep in mind that this method is normally provided by the other chain, which is - /// bridged with this chain. - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str; - /// Name of the runtime API method that is returning interval between source chain - /// headers that may be submitted for free to the target chain. - /// - /// Keep in mind that this method is normally provided by the other chain, which is - /// bridged with this chain. - const FREE_HEADERS_INTERVAL_METHOD: &'static str; - - /// Average block interval. - /// - /// How often blocks are produced on that chain. It's suggested to set this value - /// to match the block time of the chain. - const AVERAGE_BLOCK_INTERVAL: Duration; - - /// Block type. - type SignedBlock: Member + Serialize + DeserializeOwned + BlockWithJustification; - /// The aggregated `Call` type. - type Call: Clone + Codec + Debug + Send + Sync; -} - -/// Substrate-based relay chain that supports parachains. -/// -/// We assume that the parachains are supported using `runtime_parachains::paras` pallet. -pub trait RelayChain: Chain { - /// Name of the `runtime_parachains::paras` pallet in the runtime of this chain. - const PARAS_PALLET_NAME: &'static str; - /// Name of the `pallet-bridge-parachains`, deployed at the **bridged** chain to sync - /// parachains of **this** chain. - const WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME: &'static str; -} - -/// Substrate-based chain that is using direct GRANDPA finality from minimal relay-client point of -/// view. -/// -/// Keep in mind that parachains are relying on relay chain GRANDPA, so they should not implement -/// this trait. -pub trait ChainWithGrandpa: Chain + ChainWithGrandpaBase { - /// Name of the runtime API method that is returning the GRANDPA info associated with the - /// headers accepted by the `submit_finality_proofs` extrinsic in the queried block. - /// - /// Keep in mind that this method is normally provided by the other chain, which is - /// bridged with this chain. - const SYNCED_HEADERS_GRANDPA_INFO_METHOD: &'static str; - - /// The type of the key owner proof used by the grandpa engine. - type KeyOwnerProof: Decode + TypeInfo + Send; -} - -/// Substrate-based parachain from minimal relay-client point of view. -pub trait Parachain: Chain + ParachainBase {} - -impl Parachain for T where T: UnderlyingChainProvider + Chain + ParachainBase {} - -/// Substrate-based chain with messaging support from minimal relay-client point of view. -pub trait ChainWithMessages: Chain + ChainWithMessagesBase { - // TODO (https://github.com/paritytech/parity-bridges-common/issues/1692): check all the names - // after the issue is fixed - all names must be changed - - /// Name of the bridge relayers pallet (used in `construct_runtime` macro call) that is deployed - /// at some other chain to bridge with this `ChainWithMessages`. - /// - /// We assume that all chains that are bridging with this `ChainWithMessages` are using - /// the same name. - const WITH_CHAIN_RELAYERS_PALLET_NAME: Option<&'static str>; - - /// Name of the `ToOutboundLaneApi::message_details` runtime API method. - /// The method is provided by the runtime that is bridged with this `ChainWithMessages`. - const TO_CHAIN_MESSAGE_DETAILS_METHOD: &'static str; - - /// Name of the `FromInboundLaneApi::message_details` runtime API method. - /// The method is provided by the runtime that is bridged with this `ChainWithMessages`. - const FROM_CHAIN_MESSAGE_DETAILS_METHOD: &'static str; -} - -/// Call type used by the chain. -pub type CallOf = ::Call; -/// Transaction status of the chain. -pub type TransactionStatusOf = TransactionStatus, HashOf>; - -/// Substrate-based chain with `AccountData` generic argument of `frame_system::AccountInfo` set to -/// the `pallet_balances::AccountData`. -pub trait ChainWithBalances: Chain { - /// Return runtime storage key for getting `frame_system::AccountInfo` of given account. - fn account_info_storage_key(account_id: &Self::AccountId) -> StorageKey; -} - -/// SCALE-encoded extrinsic. -pub type EncodedExtrinsic = Vec; - -/// Block with justification. -pub trait BlockWithJustification
{ - /// Return block header. - fn header(&self) -> Header; - /// Return encoded block extrinsics. - fn extrinsics(&self) -> Vec; - /// Return block justification, if known. - fn justification(&self, engine_id: ConsensusEngineId) -> Option<&EncodedJustification>; -} - -/// Transaction before it is signed. -#[derive(Clone, Debug, PartialEq)] -pub struct UnsignedTransaction { - /// Runtime call of this transaction. - pub call: EncodedOrDecodedCall, - /// Transaction nonce. - pub nonce: C::Nonce, - /// Tip included into transaction. - pub tip: C::Balance, - /// Transaction era used by the chain. - pub era: TransactionEraOf, -} - -impl UnsignedTransaction { - /// Create new unsigned transaction with given call, nonce, era and zero tip. - pub fn new(call: EncodedOrDecodedCall, nonce: C::Nonce) -> Self { - Self { call, nonce, era: TransactionEra::Immortal, tip: Zero::zero() } - } - - /// Convert to the transaction of the other compatible chain. - pub fn switch_chain(self) -> UnsignedTransaction - where - Other: Chain< - Nonce = C::Nonce, - Balance = C::Balance, - BlockNumber = C::BlockNumber, - Hash = C::Hash, - >, - { - UnsignedTransaction { - call: EncodedOrDecodedCall::Encoded(self.call.into_encoded()), - nonce: self.nonce, - tip: self.tip, - era: self.era, - } - } - - /// Set transaction tip. - #[must_use] - pub fn tip(mut self, tip: C::Balance) -> Self { - self.tip = tip; - self - } - - /// Set transaction era. - #[must_use] - pub fn era(mut self, era: TransactionEraOf) -> Self { - self.era = era; - self - } -} - -/// Account key pair used by transactions signing scheme. -pub type AccountKeyPairOf = ::AccountKeyPair; - -/// Substrate-based chain transactions signing scheme. -pub trait ChainWithTransactions: Chain { - /// Type of key pairs used to sign transactions. - type AccountKeyPair: Pair + Clone + Send + Sync; - /// Signed transaction. - type SignedTransaction: Clone + Debug + Codec + Send + 'static; - - /// Create transaction for given runtime call, signed by given account. - fn sign_transaction( - param: SignParam, - unsigned: UnsignedTransaction, - ) -> Result - where - Self: Sized; -} - -/// Sign transaction parameters -pub struct SignParam { - /// Version of the runtime specification. - pub spec_version: u32, - /// Transaction version - pub transaction_version: u32, - /// Hash of the genesis block. - pub genesis_hash: HashOf, - /// Signer account - pub signer: AccountKeyPairOf, -} - -impl BlockWithJustification for SignedBlock { - fn header(&self) -> Block::Header { - self.block.header().clone() - } - - fn extrinsics(&self) -> Vec { - self.block.extrinsics().iter().map(Encode::encode).collect() - } - - fn justification(&self, engine_id: ConsensusEngineId) -> Option<&EncodedJustification> { - self.justifications.as_ref().and_then(|j| j.get(engine_id)) - } -} - -/// Trait that provides functionality defined inside `pallet-utility` -pub trait UtilityPallet { - /// Create batch call from given calls vector. - fn build_batch_call(calls: Vec) -> C::Call; -} - -/// Structure that implements `UtilityPalletProvider` based on a full runtime. -pub struct FullRuntimeUtilityPallet { - _phantom: std::marker::PhantomData, -} - -impl UtilityPallet for FullRuntimeUtilityPallet -where - C: Chain, - R: pallet_utility::Config, - ::RuntimeCall: From>, -{ - fn build_batch_call(calls: Vec) -> C::Call { - pallet_utility::Call::batch_all { calls }.into() - } -} - -/// Structure that implements `UtilityPalletProvider` based on a call conversion. -pub struct MockedRuntimeUtilityPallet { - _phantom: std::marker::PhantomData, -} - -impl UtilityPallet for MockedRuntimeUtilityPallet -where - C: Chain, - C::Call: From>, -{ - fn build_batch_call(calls: Vec) -> C::Call { - UtilityCall::batch_all(calls).into() - } -} - -/// Substrate-based chain that uses `pallet-utility`. -pub trait ChainWithUtilityPallet: Chain { - /// The utility pallet provider. - type UtilityPallet: UtilityPallet; -} diff --git a/relays/client-substrate/src/client.rs b/relays/client-substrate/src/client.rs deleted file mode 100644 index 8328e1ce8bec130e1fb3149d84d6e144b729b4ba..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/client.rs +++ /dev/null @@ -1,851 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node client. - -use crate::{ - chain::{Chain, ChainWithTransactions}, - rpc::{ - SubstrateAuthorClient, SubstrateChainClient, SubstrateFinalityClient, - SubstrateFrameSystemClient, SubstrateStateClient, SubstrateSystemClient, - }, - transaction_stall_timeout, AccountKeyPairOf, ChainWithGrandpa, ConnectionParams, Error, HashOf, - HeaderIdOf, Result, SignParam, TransactionTracker, UnsignedTransaction, -}; - -use async_std::sync::{Arc, Mutex, RwLock}; -use async_trait::async_trait; -use bp_runtime::{HeaderIdProvider, StorageDoubleMapKeyProvider, StorageMapKeyProvider}; -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use futures::{SinkExt, StreamExt}; -use jsonrpsee::{ - core::DeserializeOwned, - ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}, -}; -use num_traits::{Saturating, Zero}; -use pallet_transaction_payment::RuntimeDispatchInfo; -use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, Hasher, Pair, -}; -use sp_runtime::{ - traits::Header as HeaderT, - transaction_validity::{TransactionSource, TransactionValidity}, -}; -use sp_trie::StorageProof; -use sp_version::RuntimeVersion; -use std::future::Future; - -const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; -const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = - "GrandpaApi_generate_key_ownership_proof"; -const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; -const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; -const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; - -/// The difference between best block number and number of its ancestor, that is enough -/// for us to consider that ancestor an "ancient" block with dropped state. -/// -/// The relay does not assume that it is connected to the archive node, so it always tries -/// to use the best available chain state. But sometimes it still may use state of some -/// old block. If the state of that block is already dropped, relay will see errors when -/// e.g. it tries to prove something. -/// -/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use -/// half of this value. -pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; - -/// Returns `true` if we think that the state is already discarded for given block. -pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { - best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) -} - -/// Opaque justifications subscription type. -pub struct Subscription(pub(crate) Mutex>>); - -/// Opaque GRANDPA authorities set. -pub type OpaqueGrandpaAuthoritiesSet = Vec; - -/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. -#[derive(Copy, Clone, Debug)] -pub struct SimpleRuntimeVersion { - /// Version of the runtime specification. - pub spec_version: u32, - /// All existing dispatches are fully compatible when this number doesn't change. - pub transaction_version: u32, -} - -impl SimpleRuntimeVersion { - /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. - pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { - Self { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - } - } -} - -/// Chain runtime version in client -#[derive(Clone, Debug)] -pub enum ChainRuntimeVersion { - /// Auto query from chain. - Auto, - /// Custom runtime version, defined by user. - Custom(SimpleRuntimeVersion), -} - -/// Substrate client type. -/// -/// Cloning `Client` is a cheap operation that only clones internal references. Different -/// clones of the same client are guaranteed to use the same references. -pub struct Client { - // Lock order: `submit_signed_extrinsic_lock`, `data` - /// Client connection params. - params: Arc, - /// Saved chain runtime version. - chain_runtime_version: ChainRuntimeVersion, - /// If several tasks are submitting their transactions simultaneously using - /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of - /// transactions will be rejected from the pool. This lock is here to prevent situations like - /// that. - submit_signed_extrinsic_lock: Arc>, - /// Genesis block hash. - genesis_hash: HashOf, - /// Shared dynamic data. - data: Arc>, -} - -/// Client data, shared by all `Client` clones. -struct ClientData { - /// Tokio runtime handle. - tokio: Arc, - /// Substrate RPC client. - client: Arc, -} - -/// Already encoded value. -struct PreEncoded(Vec); - -impl Encode for PreEncoded { - fn encode(&self) -> Vec { - self.0.clone() - } -} - -#[async_trait] -impl relay_utils::relay_loop::Client for Client { - type Error = Error; - - async fn reconnect(&mut self) -> Result<()> { - let mut data = self.data.write().await; - let (tokio, client) = Self::build_client(&self.params).await?; - data.tokio = tokio; - data.client = client; - Ok(()) - } -} - -impl Clone for Client { - fn clone(&self) -> Self { - Client { - params: self.params.clone(), - chain_runtime_version: self.chain_runtime_version.clone(), - submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), - genesis_hash: self.genesis_hash, - data: self.data.clone(), - } - } -} - -impl std::fmt::Debug for Client { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish() - } -} - -impl Client { - /// Returns client that is able to call RPCs on Substrate node over websocket connection. - /// - /// This function will keep connecting to given Substrate node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - let params = Arc::new(params); - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to {} node: {:?}. Going to retry in {}s", - C::NAME, - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection - /// has been established or error otherwise. - pub async fn try_connect(params: Arc) -> Result { - let (tokio, client) = Self::build_client(¶ms).await?; - - let number: C::BlockNumber = Zero::zero(); - let genesis_hash_client = client.clone(); - let genesis_hash = tokio - .spawn(async move { - SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(number)).await - }) - .await??; - - let chain_runtime_version = params.chain_runtime_version.clone(); - Ok(Self { - params, - chain_runtime_version, - submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), - genesis_hash, - data: Arc::new(RwLock::new(ClientData { tokio, client })), - }) - } - - /// Build client to use in connection. - async fn build_client( - params: &ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - let uri = format!( - "{}://{}:{}", - if params.secure { "wss" } else { "ws" }, - params.host, - params.port, - ); - log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); - - let client = tokio - .spawn(async move { - RpcClientBuilder::default() - .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) - .build(&uri) - .await - }) - .await??; - - Ok((Arc::new(tokio), Arc::new(client))) - } -} - -impl Client { - /// Return simple runtime version, only include `spec_version` and `transaction_version`. - pub async fn simple_runtime_version(&self) -> Result { - Ok(match &self.chain_runtime_version { - ChainRuntimeVersion::Auto => { - let runtime_version = self.runtime_version().await?; - SimpleRuntimeVersion::from_runtime_version(&runtime_version) - }, - ChainRuntimeVersion::Custom(version) => *version, - }) - } - - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(|client| async move { - let health = SubstrateSystemClient::::health(&*client).await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } - }) - .await - } - - /// Return hash of the genesis block. - pub fn genesis_hash(&self) -> &C::Hash { - &self.genesis_hash - } - - /// Return hash of the best finalized block. - pub async fn best_finalized_header_hash(&self) -> Result { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::finalized_head(&*client).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestFinalizedHeaderHash { - chain: C::NAME.into(), - error: e.boxed(), - }) - } - - /// Return number of the best finalized block. - pub async fn best_finalized_header_number(&self) -> Result { - Ok(*self.best_finalized_header().await?.number()) - } - - /// Return header of the best finalized block. - pub async fn best_finalized_header(&self) -> Result { - self.header_by_hash(self.best_finalized_header_hash().await?).await - } - - /// Returns the best Substrate header. - pub async fn best_header(&self) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::header(&*client, None).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() }) - } - - /// Get a Substrate block from its hash. - pub async fn get_block(&self, block_hash: Option) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block(&*client, block_hash).await?) - }) - .await - } - - /// Get a Substrate header by its hash. - pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::header(&*client, Some(block_hash)).await?) - }) - .await - .map_err(|e| Error::FailedToReadHeaderByHash { - chain: C::NAME.into(), - hash: format!("{block_hash}"), - error: e.boxed(), - }) - } - - /// Get a Substrate block hash by its number. - pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) - }) - .await - } - - /// Get a Substrate header by its number. - pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result - where - C::Header: DeserializeOwned, - { - let block_hash = Self::block_hash_by_number(self, block_number).await?; - let header_by_hash = Self::header_by_hash(self, block_hash).await?; - Ok(header_by_hash) - } - - /// Return runtime version. - pub async fn runtime_version(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::runtime_version(&*client).await?) - }) - .await - } - - /// Read value from runtime storage. - pub async fn storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `MapStorage` value from runtime storage. - pub async fn storage_map_value( - &self, - pallet_prefix: &str, - key: &T::Key, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `DoubleMapStorage` value from runtime storage. - pub async fn storage_double_map_value( - &self, - pallet_prefix: &str, - key1: &T::Key1, - key2: &T::Key2, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key1, key2); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read raw value from runtime storage. - pub async fn raw_storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - let cloned_storage_key = storage_key.clone(); - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::storage(&*client, storage_key.clone(), block_hash) - .await?) - }) - .await - .map_err(|e| Error::FailedToReadRuntimeStorageValue { - chain: C::NAME.into(), - key: cloned_storage_key, - error: e.boxed(), - }) - } - - /// Get the nonce of the given Substrate account. - /// - /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. - pub async fn next_account_index(&self, account: C::AccountId) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) - }) - .await - } - - /// Submit unsigned extrinsic for inclusion in a block. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - let best_header_hash = self.best_header().await?.hash(); - self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> - where - C: ChainWithTransactions, - { - let runtime_version = self.simple_runtime_version().await?; - Ok(SignParam:: { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - genesis_hash: self.genesis_hash, - signer, - }) - } - - /// Submit an extrinsic signed by given account. - /// - /// All calls of this method are synchronized, so there can't be more than one active - /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen - /// if all client instances are clones of the same initial `Client`. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let signing_data = self.build_sign_params(signer.clone()).await?; - - // By using parent of best block here, we are protecing again best-block reorganizations. - // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it - // has been changed to `B[num=100]`. Hash of `A` has been included into transaction - // signature payload. So when signature will be checked, the check will fail and transaction - // will be dropped from the pool. - let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = - SubstrateAuthorClient::::submit_extrinsic(&*client, Bytes(signed_extrinsic)) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status - /// after submission. - pub async fn submit_and_watch_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result> - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let self_clone = self.clone(); - let signing_data = self.build_sign_params(signer.clone()).await?; - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let best_header_id = best_header.id(); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let stall_timeout = transaction_stall_timeout( - extrinsic.era.mortality_period(), - C::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - let (tracker, subscription) = self - .jsonrpsee_execute(move |client| async move { - let tx_hash = C::Hasher::hash(&signed_extrinsic); - let subscription = SubstrateAuthorClient::::submit_and_watch_extrinsic( - &*client, - Bytes(signed_extrinsic), - ) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - let tracker = TransactionTracker::new( - self_clone, - stall_timeout, - tx_hash, - Subscription(Mutex::new(receiver)), - ); - Ok((tracker, subscription)) - }) - .await?; - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "extrinsic".into(), - subscription, - sender, - )); - Ok(tracker) - } - - /// Returns pending extrinsics from transaction pool. - pub async fn pending_extrinsics(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) - }) - .await - } - - /// Validate transaction at given block state. - pub async fn validate_transaction( - &self, - at_block: C::Hash, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string(); - let data = Bytes((TransactionSource::External, transaction, at_block).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(at_block)).await?; - let validity = TransactionValidity::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(validity) - }) - .await - } - - /// Returns weight of the given transaction. - pub async fn extimate_extrinsic_weight( - &self, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let transaction_len = transaction.encoded_size() as u32; - - let call = SUB_API_TX_PAYMENT_QUERY_INFO.to_string(); - let data = Bytes((transaction, transaction_len).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, None).await?; - let dispatch_info = - RuntimeDispatchInfo::::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(dispatch_info.weight) - }) - .await - } - - /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set( - &self, - block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(block)).await?; - let authority_list = encoded_response.0; - - Ok(authority_list) - }) - .await - } - - /// Execute runtime call at given block, provided the input and output types. - /// It also performs the input encode and output decode. - pub async fn typed_state_call( - &self, - method_name: String, - input: Input, - at_block: Option, - ) -> Result { - let encoded_output = self - .state_call(method_name.clone(), Bytes(input.encode()), at_block) - .await - .map_err(|e| Error::ErrorExecutingRuntimeCall { - chain: C::NAME.into(), - method: method_name, - error: e.boxed(), - })?; - Output::decode(&mut &encoded_output.0[..]).map_err(Error::ResponseParseFailed) - } - - /// Execute runtime call at given block. - pub async fn state_call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::call(&*client, method, data, at_block) - .await - .map_err(Into::into) - }) - .await - } - - /// Returns storage proof of given storage keys. - pub async fn prove_storage( - &self, - keys: Vec, - at_block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::prove_storage(&*client, keys, Some(at_block)) - .await - .map(|proof| { - StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect::>()) - }) - .map_err(Into::into) - }) - .await - } - - /// Return `tokenDecimals` property from the set of chain properties. - pub async fn token_decimals(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - let system_properties = SubstrateSystemClient::::properties(&*client).await?; - Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) - }) - .await - } - - /// Return new finality justifications stream. - pub async fn subscribe_finality_justifications>( - &self, - ) -> Result> { - let subscription = self - .jsonrpsee_execute(move |client| async move { - Ok(FC::subscribe_justifications(&client).await?) - }) - .await?; - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "justification".into(), - subscription, - sender, - )); - Ok(Subscription(Mutex::new(receiver))) - } - - /// Generates a proof of key ownership for the given authority in the given set. - pub async fn generate_grandpa_key_ownership_proof( - &self, - at: HashOf, - set_id: sp_consensus_grandpa::SetId, - authority_id: sp_consensus_grandpa::AuthorityId, - ) -> Result> - where - C: ChainWithGrandpa, - { - self.typed_state_call( - SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), - (set_id, authority_id), - Some(at), - ) - .await - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send + 'static, - T: Send + 'static, - { - let data = self.data.read().await; - let client = data.client.clone(); - data.tokio.spawn(make_jsonrpsee_future(client)).await? - } - - /// Returns `true` if version guard can be started. - /// - /// There's no reason to run version guard when version mode is set to `Auto`. It can - /// lead to relay shutdown when chain is upgraded, even though we have explicitly - /// said that we don't want to shutdown. - pub fn can_start_version_guard(&self) -> bool { - !matches!(self.chain_runtime_version, ChainRuntimeVersion::Auto) - } -} - -impl Subscription { - /// Consumes subscription and returns future statuses stream. - pub fn into_stream(self) -> impl futures::Stream { - futures::stream::unfold(self, |this| async { - let item = this.0.lock().await.next().await.unwrap_or(None); - item.map(|i| (i, this)) - }) - } - - /// Return next item from the subscription. - pub async fn next(&self) -> Result> { - let mut receiver = self.0.lock().await; - let item = receiver.next().await; - Ok(item.unwrap_or(None)) - } - - /// Background worker that is executed in tokio context as `jsonrpsee` requires. - async fn background_worker( - chain_name: String, - item_type: String, - mut subscription: jsonrpsee::core::client::Subscription, - mut sender: futures::channel::mpsc::Sender>, - ) { - loop { - match subscription.next().await { - Some(Ok(item)) => - if sender.send(Some(item)).await.is_err() { - break - }, - Some(Err(e)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted.", - chain_name, - item_type, - e, - ); - let _ = sender.send(None).await; - break - }, - None => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted.", - chain_name, - item_type, - ); - let _ = sender.send(None).await; - break - }, - } - } - } -} diff --git a/relays/client-substrate/src/error.rs b/relays/client-substrate/src/error.rs deleted file mode 100644 index 40015c122bbe97ae249fea8f605d686b31122471..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/error.rs +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node RPC errors. - -use bp_polkadot_core::parachains::ParaId; -use jsonrpsee::core::Error as RpcError; -use relay_utils::MaybeConnectionError; -use sc_rpc_api::system::Health; -use sp_core::storage::StorageKey; -use sp_runtime::transaction_validity::TransactionValidityError; -use thiserror::Error; - -/// Result type used by Substrate client. -pub type Result = std::result::Result; - -/// Errors that can occur only when interacting with -/// a Substrate node through RPC. -#[derive(Error, Debug)] -pub enum Error { - /// IO error. - #[error("IO error: {0}")] - Io(#[from] std::io::Error), - /// An error that can occur when making a request to - /// an JSON-RPC server. - #[error("RPC error: {0}")] - RpcError(#[from] RpcError), - /// The response from the server could not be SCALE decoded. - #[error("Response parse failed: {0}")] - ResponseParseFailed(#[from] codec::Error), - /// Account does not exist on the chain. - #[error("Account does not exist on the chain.")] - AccountDoesNotExist, - /// Runtime storage is missing some mandatory value. - #[error("Mandatory storage value is missing from the runtime storage.")] - MissingMandatoryStorageValue, - /// Required parachain head is not present at the relay chain. - #[error("Parachain {0:?} head {1} is missing from the relay chain storage.")] - MissingRequiredParachainHead(ParaId, u64), - /// Failed to find finality proof for the given header. - #[error("Failed to find finality proof for header {0}.")] - FinalityProofNotFound(u64), - /// The client we're connected to is not synced, so we can't rely on its state. - #[error("Substrate client is not synced {0}.")] - ClientNotSynced(Health), - /// Failed to read best finalized header hash from given chain. - #[error("Failed to read best finalized header hash of {chain}: {error:?}.")] - FailedToReadBestFinalizedHeaderHash { - /// Name of the chain where the error has happened. - chain: String, - /// Underlying error. - error: Box, - }, - /// Failed to read best finalized header from given chain. - #[error("Failed to read best header of {chain}: {error:?}.")] - FailedToReadBestHeader { - /// Name of the chain where the error has happened. - chain: String, - /// Underlying error. - error: Box, - }, - /// Failed to read header by hash from given chain. - #[error("Failed to read header {hash} of {chain}: {error:?}.")] - FailedToReadHeaderByHash { - /// Name of the chain where the error has happened. - chain: String, - /// Hash of the header we've tried to read. - hash: String, - /// Underlying error. - error: Box, - }, - /// Failed to execute runtime call at given chain. - #[error("Failed to execute runtime call {method} at {chain}: {error:?}.")] - ErrorExecutingRuntimeCall { - /// Name of the chain where the error has happened. - chain: String, - /// Runtime method name. - method: String, - /// Underlying error. - error: Box, - }, - /// Failed to read sotrage value at given chain. - #[error("Failed to read storage value {key:?} at {chain}: {error:?}.")] - FailedToReadRuntimeStorageValue { - /// Name of the chain where the error has happened. - chain: String, - /// Runtime storage key - key: StorageKey, - /// Underlying error. - error: Box, - }, - /// The bridge pallet is halted and all transactions will be rejected. - #[error("Bridge pallet is halted.")] - BridgePalletIsHalted, - /// The bridge pallet is not yet initialized and all transactions will be rejected. - #[error("Bridge pallet is not initialized.")] - BridgePalletIsNotInitialized, - /// There's no best head of the parachain at the `pallet-bridge-parachains` at the target side. - #[error("No head of the ParaId({0}) at the bridge parachains pallet at {1}.")] - NoParachainHeadAtTarget(u32, String), - /// An error has happened when we have tried to parse storage proof. - #[error("Error when parsing storage proof: {0:?}.")] - StorageProofError(bp_runtime::StorageProofError), - /// The Substrate transaction is invalid. - #[error("Substrate transaction is invalid: {0:?}")] - TransactionInvalid(#[from] TransactionValidityError), - /// Custom logic error. - #[error("{0}")] - Custom(String), -} - -impl From for Error { - fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {error}")) - } -} - -impl Error { - /// Box the error. - pub fn boxed(self) -> Box { - Box::new(self) - } -} - -impl MaybeConnectionError for Error { - fn is_connection_error(&self) -> bool { - match *self { - Error::RpcError(RpcError::Transport(_)) | - Error::RpcError(RpcError::RestartNeeded(_)) | - Error::ClientNotSynced(_) => true, - Error::FailedToReadBestFinalizedHeaderHash { ref error, .. } => - error.is_connection_error(), - Error::FailedToReadBestHeader { ref error, .. } => error.is_connection_error(), - Error::FailedToReadHeaderByHash { ref error, .. } => error.is_connection_error(), - Error::ErrorExecutingRuntimeCall { ref error, .. } => error.is_connection_error(), - Error::FailedToReadRuntimeStorageValue { ref error, .. } => error.is_connection_error(), - _ => false, - } - } -} diff --git a/relays/client-substrate/src/guard.rs b/relays/client-substrate/src/guard.rs deleted file mode 100644 index 545396b30b85963fe0cc6afcdc62fe141a1069b8..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/guard.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Pallet provides a set of guard functions that are running in background threads -//! and are aborting process if some condition fails. - -use crate::{error::Error, Chain, Client}; - -use async_trait::async_trait; -use sp_version::RuntimeVersion; -use std::{ - fmt::Display, - time::{Duration, Instant}, -}; - -/// Guards environment. -#[async_trait] -pub trait Environment: Send + Sync + 'static { - /// Error type. - type Error: Display + Send + Sync + 'static; - - /// Return current runtime version. - async fn runtime_version(&mut self) -> Result; - - /// Return current time. - fn now(&self) -> Instant { - Instant::now() - } - - /// Sleep given amount of time. - async fn sleep(&mut self, duration: Duration) { - async_std::task::sleep(duration).await - } - - /// Abort current process. Called when guard condition check fails. - async fn abort(&mut self) { - std::process::abort(); - } -} - -/// Abort when runtime spec version is different from specified. -pub fn abort_on_spec_version_change( - mut env: impl Environment, - expected_spec_version: u32, -) { - async_std::task::spawn(async move { - log::info!( - target: "bridge-guard", - "Starting spec_version guard for {}. Expected spec_version: {}", - C::NAME, - expected_spec_version, - ); - - loop { - let actual_spec_version = env.runtime_version().await; - match actual_spec_version { - Ok(version) if version.spec_version == expected_spec_version => (), - Ok(version) => { - log::error!( - target: "bridge-guard", - "{} runtime spec version has changed from {} to {}. Aborting relay", - C::NAME, - expected_spec_version, - version.spec_version, - ); - - env.abort().await; - }, - Err(error) => log::warn!( - target: "bridge-guard", - "Failed to read {} runtime version: {}. Relay may need to be stopped manually", - C::NAME, - error, - ), - } - - env.sleep(conditions_check_delay::()).await; - } - }); -} - -/// Delay between conditions check. -fn conditions_check_delay() -> Duration { - C::AVERAGE_BLOCK_INTERVAL * (10 + rand::random::() % 10) -} - -#[async_trait] -impl Environment for Client { - type Error = Error; - - async fn runtime_version(&mut self) -> Result { - Client::::runtime_version(self).await - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_chain::TestChain; - use futures::{ - channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, - future::FutureExt, - stream::StreamExt, - SinkExt, - }; - - struct TestEnvironment { - runtime_version_rx: UnboundedReceiver, - slept_tx: UnboundedSender<()>, - aborted_tx: UnboundedSender<()>, - } - - #[async_trait] - impl Environment for TestEnvironment { - type Error = Error; - - async fn runtime_version(&mut self) -> Result { - Ok(self.runtime_version_rx.next().await.unwrap_or_default()) - } - - async fn sleep(&mut self, _duration: Duration) { - let _ = self.slept_tx.send(()).await; - } - - async fn abort(&mut self) { - let _ = self.aborted_tx.send(()).await; - // simulate process abort :) - async_std::task::sleep(Duration::from_secs(60)).await; - } - } - - #[test] - fn aborts_when_spec_version_is_changed() { - async_std::task::block_on(async { - let ( - (mut runtime_version_tx, runtime_version_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded()); - abort_on_spec_version_change( - TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }, - 0, - ); - - // client responds with wrong version - runtime_version_tx - .send(RuntimeVersion { spec_version: 42, ..Default::default() }) - .await - .unwrap(); - - // then the `abort` function is called - aborted_rx.next().await; - // and we do not reach the `sleep` function call - assert!(slept_rx.next().now_or_never().is_none()); - }); - } - - #[test] - fn does_not_aborts_when_spec_version_is_unchanged() { - async_std::task::block_on(async { - let ( - (mut runtime_version_tx, runtime_version_rx), - (slept_tx, mut slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded()); - abort_on_spec_version_change( - TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }, - 42, - ); - - // client responds with the same version - runtime_version_tx - .send(RuntimeVersion { spec_version: 42, ..Default::default() }) - .await - .unwrap(); - - // then the `sleep` function is called - slept_rx.next().await; - // and the `abort` function is not called - assert!(aborted_rx.next().now_or_never().is_none()); - }); - } -} diff --git a/relays/client-substrate/src/lib.rs b/relays/client-substrate/src/lib.rs deleted file mode 100644 index 84c2ad10cf8f6fa2d3211468fdf7442cdff007bc..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/lib.rs +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools to interact with Substrate node using RPC methods. - -#![warn(missing_docs)] - -mod chain; -mod client; -mod error; -mod rpc; -mod sync_header; -mod transaction_tracker; - -pub mod calls; -pub mod guard; -pub mod metrics; -pub mod test_chain; - -use std::time::Duration; - -pub use crate::{ - chain::{ - AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, - ChainWithGrandpa, ChainWithMessages, ChainWithTransactions, ChainWithUtilityPallet, - FullRuntimeUtilityPallet, MockedRuntimeUtilityPallet, Parachain, RelayChain, SignParam, - TransactionStatusOf, UnsignedTransaction, UtilityPallet, - }, - client::{ - is_ancient_block, ChainRuntimeVersion, Client, OpaqueGrandpaAuthoritiesSet, - SimpleRuntimeVersion, Subscription, ANCIENT_BLOCK_THRESHOLD, - }, - error::{Error, Result}, - rpc::{SubstrateBeefyFinalityClient, SubstrateFinalityClient, SubstrateGrandpaFinalityClient}, - sync_header::SyncHeader, - transaction_tracker::TransactionTracker, -}; -pub use bp_runtime::{ - AccountIdOf, AccountPublicOf, BalanceOf, BlockNumberOf, Chain as ChainBase, HashOf, HeaderIdOf, - HeaderOf, NonceOf, Parachain as ParachainBase, SignatureOf, TransactionEra, TransactionEraOf, - UnderlyingChainProvider, -}; - -/// Substrate-over-websocket connection params. -#[derive(Debug, Clone)] -pub struct ConnectionParams { - /// Websocket server host name. - pub host: String, - /// Websocket server TCP port. - pub port: u16, - /// Use secure websocket connection. - pub secure: bool, - /// Defined chain runtime version - pub chain_runtime_version: ChainRuntimeVersion, -} - -impl Default for ConnectionParams { - fn default() -> Self { - ConnectionParams { - host: "localhost".into(), - port: 9944, - secure: false, - chain_runtime_version: ChainRuntimeVersion::Auto, - } - } -} - -/// Returns stall timeout for relay loop. -/// -/// Relay considers himself stalled if he has submitted transaction to the node, but it has not -/// been mined for this period. -pub fn transaction_stall_timeout( - mortality_period: Option, - average_block_interval: Duration, - default_stall_timeout: Duration, -) -> Duration { - // 1 extra block for transaction to reach the pool && 1 for relayer to awake after it is mined - mortality_period - .map(|mortality_period| average_block_interval.saturating_mul(mortality_period + 1 + 1)) - .unwrap_or(default_stall_timeout) -} diff --git a/relays/client-substrate/src/metrics/float_storage_value.rs b/relays/client-substrate/src/metrics/float_storage_value.rs deleted file mode 100644 index 7bb92693b38d27f42b623d323ba3e7ced8ebbda2..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/metrics/float_storage_value.rs +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{chain::Chain, client::Client, Error as SubstrateError}; - -use async_std::sync::{Arc, RwLock}; -use async_trait::async_trait; -use codec::Decode; -use num_traits::One; -use relay_utils::metrics::{ - metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, - StandaloneMetric, F64, -}; -use sp_core::storage::{StorageData, StorageKey}; -use sp_runtime::{traits::UniqueSaturatedInto, FixedPointNumber, FixedU128}; -use std::{marker::PhantomData, time::Duration}; - -/// Storage value update interval (in blocks). -const UPDATE_INTERVAL_IN_BLOCKS: u32 = 5; - -/// Fied-point storage value and the way it is decoded from the raw storage value. -pub trait FloatStorageValue: 'static + Clone + Send + Sync { - /// Type of the value. - type Value: FixedPointNumber; - /// Try to decode value from the raw storage value. - fn decode( - &self, - maybe_raw_value: Option, - ) -> Result, SubstrateError>; -} - -/// Implementation of `FloatStorageValue` that expects encoded `FixedU128` value and returns `1` if -/// value is missing from the storage. -#[derive(Clone, Debug, Default)] -pub struct FixedU128OrOne; - -impl FloatStorageValue for FixedU128OrOne { - type Value = FixedU128; - - fn decode( - &self, - maybe_raw_value: Option, - ) -> Result, SubstrateError> { - maybe_raw_value - .map(|raw_value| { - FixedU128::decode(&mut &raw_value.0[..]) - .map_err(SubstrateError::ResponseParseFailed) - .map(Some) - }) - .unwrap_or_else(|| Ok(Some(FixedU128::one()))) - } -} - -/// Metric that represents fixed-point runtime storage value as float gauge. -#[derive(Clone, Debug)] -pub struct FloatStorageValueMetric { - value_converter: V, - client: Client, - storage_key: StorageKey, - metric: Gauge, - shared_value_ref: F64SharedRef, - _phantom: PhantomData, -} - -impl FloatStorageValueMetric { - /// Create new metric. - pub fn new( - value_converter: V, - client: Client, - storage_key: StorageKey, - name: String, - help: String, - ) -> Result { - let shared_value_ref = Arc::new(RwLock::new(None)); - Ok(FloatStorageValueMetric { - value_converter, - client, - storage_key, - metric: Gauge::new(metric_name(None, &name), help)?, - shared_value_ref, - _phantom: Default::default(), - }) - } - - /// Get shared reference to metric value. - pub fn shared_value_ref(&self) -> F64SharedRef { - self.shared_value_ref.clone() - } -} - -impl Metric for FloatStorageValueMetric { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.metric.clone(), registry).map(drop) - } -} - -#[async_trait] -impl StandaloneMetric for FloatStorageValueMetric { - fn update_interval(&self) -> Duration { - C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS - } - - async fn update(&self) { - let value = self - .client - .raw_storage_value(self.storage_key.clone(), None) - .await - .and_then(|maybe_storage_value| { - self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { - maybe_fixed_point_value.map(|fixed_point_value| { - fixed_point_value.into_inner().unique_saturated_into() as f64 / - V::Value::DIV.unique_saturated_into() as f64 - }) - }) - }) - .map_err(|e| e.to_string()); - relay_utils::metrics::set_gauge_value(&self.metric, value.clone()); - *self.shared_value_ref.write().await = value.ok().and_then(|x| x); - } -} diff --git a/relays/client-substrate/src/metrics/mod.rs b/relays/client-substrate/src/metrics/mod.rs deleted file mode 100644 index fe200e2d3dca7ea84a41e61f18912acc0d3f4332..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/metrics/mod.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Contains several Substrate-specific metrics that may be exposed by relay. - -pub use float_storage_value::{FixedU128OrOne, FloatStorageValue, FloatStorageValueMetric}; - -mod float_storage_value; diff --git a/relays/client-substrate/src/rpc.rs b/relays/client-substrate/src/rpc.rs deleted file mode 100644 index 35ab08c0f415161b3026dd9044be9fc5222e5d43..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/rpc.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The most generic Substrate node RPC interface. - -use async_trait::async_trait; - -use crate::{Chain, ChainWithGrandpa, TransactionStatusOf}; - -use jsonrpsee::{ - core::{client::Subscription, RpcResult}, - proc_macros::rpc, - ws_client::WsClient, -}; -use pallet_transaction_payment_rpc_runtime_api::FeeDetails; -use sc_rpc_api::{state::ReadProof, system::Health}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, -}; -use sp_rpc::number::NumberOrHex; -use sp_version::RuntimeVersion; - -/// RPC methods of Substrate `system` namespace, that we are using. -#[rpc(client, client_bounds(C: Chain), namespace = "system")] -pub(crate) trait SubstrateSystem { - /// Return node health. - #[method(name = "health")] - async fn health(&self) -> RpcResult; - /// Return system properties. - #[method(name = "properties")] - async fn properties(&self) -> RpcResult; -} - -/// RPC methods of Substrate `chain` namespace, that we are using. -#[rpc(client, client_bounds(C: Chain), namespace = "chain")] -pub(crate) trait SubstrateChain { - /// Get block hash by its number. - #[method(name = "getBlockHash")] - async fn block_hash(&self, block_number: Option) -> RpcResult; - /// Return block header by its hash. - #[method(name = "getHeader")] - async fn header(&self, block_hash: Option) -> RpcResult; - /// Return best finalized block hash. - #[method(name = "getFinalizedHead")] - async fn finalized_head(&self) -> RpcResult; - /// Return signed block (with justifications) by its hash. - #[method(name = "getBlock")] - async fn block(&self, block_hash: Option) -> RpcResult; -} - -/// RPC methods of Substrate `author` namespace, that we are using. -#[rpc(client, client_bounds(C: Chain), namespace = "author")] -pub(crate) trait SubstrateAuthor { - /// Submit extrinsic to the transaction pool. - #[method(name = "submitExtrinsic")] - async fn submit_extrinsic(&self, extrinsic: Bytes) -> RpcResult; - /// Return vector of pending extrinsics from the transaction pool. - #[method(name = "pendingExtrinsics")] - async fn pending_extrinsics(&self) -> RpcResult>; - /// Submit and watch for extrinsic state. - #[subscription(name = "submitAndWatchExtrinsic", unsubscribe = "unwatchExtrinsic", item = TransactionStatusOf)] - async fn submit_and_watch_extrinsic(&self, extrinsic: Bytes); -} - -/// RPC methods of Substrate `state` namespace, that we are using. -#[rpc(client, client_bounds(C: Chain), namespace = "state")] -pub(crate) trait SubstrateState { - /// Get current runtime version. - #[method(name = "getRuntimeVersion")] - async fn runtime_version(&self) -> RpcResult; - /// Call given runtime method. - #[method(name = "call")] - async fn call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> RpcResult; - /// Get value of the runtime storage. - #[method(name = "getStorage")] - async fn storage( - &self, - key: StorageKey, - at_block: Option, - ) -> RpcResult>; - /// Get proof of the runtime storage value. - #[method(name = "getReadProof")] - async fn prove_storage( - &self, - keys: Vec, - hash: Option, - ) -> RpcResult>; -} - -/// RPC methods that we are using for a certain finality gadget. -#[async_trait] -pub trait SubstrateFinalityClient { - /// Subscribe to finality justifications. - async fn subscribe_justifications(client: &WsClient) -> RpcResult>; -} - -/// RPC methods of Substrate `grandpa` namespace, that we are using. -#[rpc(client, client_bounds(C: ChainWithGrandpa), namespace = "grandpa")] -pub(crate) trait SubstrateGrandpa { - /// Subscribe to GRANDPA justifications. - #[subscription(name = "subscribeJustifications", unsubscribe = "unsubscribeJustifications", item = Bytes)] - async fn subscribe_justifications(&self); -} - -/// RPC finality methods of Substrate `grandpa` namespace, that we are using. -pub struct SubstrateGrandpaFinalityClient; -#[async_trait] -impl SubstrateFinalityClient for SubstrateGrandpaFinalityClient { - async fn subscribe_justifications(client: &WsClient) -> RpcResult> { - SubstrateGrandpaClient::::subscribe_justifications(client).await - } -} - -// TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged -/// RPC methods of Substrate `beefy` namespace, that we are using. -#[rpc(client, client_bounds(C: Chain), namespace = "beefy")] -pub(crate) trait SubstrateBeefy { - /// Subscribe to BEEFY justifications. - #[subscription(name = "subscribeJustifications", unsubscribe = "unsubscribeJustifications", item = Bytes)] - async fn subscribe_justifications(&self); -} - -/// RPC finality methods of Substrate `beefy` namespace, that we are using. -pub struct SubstrateBeefyFinalityClient; -// TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged -#[async_trait] -impl SubstrateFinalityClient for SubstrateBeefyFinalityClient { - async fn subscribe_justifications(client: &WsClient) -> RpcResult> { - SubstrateBeefyClient::::subscribe_justifications(client).await - } -} - -/// RPC methods of Substrate `system` frame pallet, that we are using. -#[rpc(client, client_bounds(C: Chain), namespace = "system")] -pub(crate) trait SubstrateFrameSystem { - /// Return index of next account transaction. - #[method(name = "accountNextIndex")] - async fn account_next_index(&self, account_id: C::AccountId) -> RpcResult; -} - -/// RPC methods of Substrate `pallet_transaction_payment` frame pallet, that we are using. -#[rpc(client, client_bounds(C: Chain), namespace = "payment")] -pub(crate) trait SubstrateTransactionPayment { - /// Query transaction fee details. - #[method(name = "queryFeeDetails")] - async fn fee_details( - &self, - extrinsic: Bytes, - at_block: Option, - ) -> RpcResult>; -} diff --git a/relays/client-substrate/src/sync_header.rs b/relays/client-substrate/src/sync_header.rs deleted file mode 100644 index fdfd1f22ce9edf9311e2ae827baf0f2a8fffbe20..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/sync_header.rs +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use bp_header_chain::ConsensusLogReader; -use finality_relay::SourceHeader as FinalitySourceHeader; -use sp_runtime::traits::Header as HeaderT; - -/// Generic wrapper for `sp_runtime::traits::Header` based headers, that -/// implements `finality_relay::SourceHeader` and may be used in headers sync directly. -#[derive(Clone, Debug, PartialEq)] -pub struct SyncHeader
(Header); - -impl
SyncHeader
{ - /// Extracts wrapped header from self. - pub fn into_inner(self) -> Header { - self.0 - } -} - -impl
std::ops::Deref for SyncHeader
{ - type Target = Header; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl
From
for SyncHeader
{ - fn from(header: Header) -> Self { - Self(header) - } -} - -impl FinalitySourceHeader - for SyncHeader
-{ - fn hash(&self) -> Header::Hash { - self.0.hash() - } - - fn number(&self) -> Header::Number { - *self.0.number() - } - - fn is_mandatory(&self) -> bool { - R::schedules_authorities_change(self.digest()) - } -} diff --git a/relays/client-substrate/src/test_chain.rs b/relays/client-substrate/src/test_chain.rs deleted file mode 100644 index e1ab6260fd6aed01724d86560b751ca79231e3df..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/test_chain.rs +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Pallet provides a set of guard functions that are running in background threads -//! and are aborting process if some condition fails. - -//! Test chain implementation to use in tests. - -#![cfg(any(feature = "test-helpers", test))] - -use crate::{Chain, ChainWithBalances}; -use bp_runtime::ChainId; -use frame_support::weights::Weight; -use std::time::Duration; - -/// Chain that may be used in tests. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct TestChain; - -impl bp_runtime::Chain for TestChain { - const ID: ChainId = *b"test"; - - type BlockNumber = u32; - type Hash = sp_core::H256; - type Hasher = sp_runtime::traits::BlakeTwo256; - type Header = sp_runtime::generic::Header; - - type AccountId = u32; - type Balance = u32; - type Nonce = u32; - type Signature = sp_runtime::testing::TestSignature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl Chain for TestChain { - const NAME: &'static str = "Test"; - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = "TestMethod"; - const FREE_HEADERS_INTERVAL_METHOD: &'static str = "TestMethod"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); - - type SignedBlock = sp_runtime::generic::SignedBlock< - sp_runtime::generic::Block, - >; - type Call = (); -} - -impl ChainWithBalances for TestChain { - fn account_info_storage_key(_account_id: &u32) -> sp_core::storage::StorageKey { - unreachable!() - } -} - -/// Primitives-level parachain that may be used in tests. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct TestParachainBase; - -impl bp_runtime::Chain for TestParachainBase { - const ID: ChainId = *b"tstp"; - - type BlockNumber = u32; - type Hash = sp_core::H256; - type Hasher = sp_runtime::traits::BlakeTwo256; - type Header = sp_runtime::generic::Header; - - type AccountId = u32; - type Balance = u32; - type Nonce = u32; - type Signature = sp_runtime::testing::TestSignature; - - fn max_extrinsic_size() -> u32 { - unreachable!() - } - - fn max_extrinsic_weight() -> Weight { - unreachable!() - } -} - -impl bp_runtime::Parachain for TestParachainBase { - const PARACHAIN_ID: u32 = 1000; - const MAX_HEADER_SIZE: u32 = 1_024; -} - -/// Parachain that may be used in tests. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct TestParachain; - -impl bp_runtime::UnderlyingChainProvider for TestParachain { - type Chain = TestParachainBase; -} - -impl Chain for TestParachain { - const NAME: &'static str = "TestParachain"; - const BEST_FINALIZED_HEADER_ID_METHOD: &'static str = "TestParachainMethod"; - const FREE_HEADERS_INTERVAL_METHOD: &'static str = "TestParachainMethod"; - const AVERAGE_BLOCK_INTERVAL: Duration = Duration::from_millis(0); - - type SignedBlock = sp_runtime::generic::SignedBlock< - sp_runtime::generic::Block, - >; - type Call = (); -} diff --git a/relays/client-substrate/src/transaction_tracker.rs b/relays/client-substrate/src/transaction_tracker.rs deleted file mode 100644 index 00375768c45c27c23dfccb7730668108a6fab788..0000000000000000000000000000000000000000 --- a/relays/client-substrate/src/transaction_tracker.rs +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Helper for tracking transaction invalidation events. - -use crate::{Chain, Client, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; - -use async_trait::async_trait; -use futures::{future::Either, Future, FutureExt, Stream, StreamExt}; -use relay_utils::{HeaderId, TrackedTransactionStatus}; -use sp_runtime::traits::Header as _; -use std::time::Duration; - -/// Transaction tracker environment. -#[async_trait] -pub trait Environment: Send + Sync { - /// Returns header id by its hash. - async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error>; -} - -#[async_trait] -impl Environment for Client { - async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error> { - self.header_by_hash(hash).await.map(|h| HeaderId(*h.number(), hash)) - } -} - -/// Substrate transaction tracker implementation. -/// -/// Substrate node provides RPC API to submit and watch for transaction events. This way -/// we may know when transaction is included into block, finalized or rejected. There are -/// some edge cases, when we can't fully trust this mechanism - e.g. transaction may broadcasted -/// and then dropped out of node transaction pool (some other cases are also possible - node -/// restarts, connection lost, ...). Then we can't know for sure - what is currently happening -/// with our transaction. Is the transaction really lost? Is it still alive on the chain network? -/// -/// We have several options to handle such cases: -/// -/// 1) hope that the transaction is still alive and wait for its mining until it is spoiled; -/// -/// 2) assume that the transaction is lost and resubmit another transaction instantly; -/// -/// 3) wait for some time (if transaction is mortal - then until block where it dies; if it is -/// immortal - then for some time that we assume is long enough to mine it) and assume that it is -/// lost. -/// -/// This struct implements third option as it seems to be the most optimal. -pub struct TransactionTracker { - environment: E, - transaction_hash: HashOf, - stall_timeout: Duration, - subscription: Subscription>, -} - -impl> TransactionTracker { - /// Create transaction tracker. - pub fn new( - environment: E, - stall_timeout: Duration, - transaction_hash: HashOf, - subscription: Subscription>, - ) -> Self { - Self { environment, stall_timeout, transaction_hash, subscription } - } - - /// Wait for final transaction status and return it along with last known internal invalidation - /// status. - async fn do_wait( - self, - wait_for_stall_timeout: impl Future, - wait_for_stall_timeout_rest: impl Future, - ) -> (TrackedTransactionStatus>, Option>>) { - // sometimes we want to wait for the rest of the stall timeout even if - // `wait_for_invalidation` has been "select"ed first => it is shared - let wait_for_invalidation = watch_transaction_status::<_, C, _>( - self.environment, - self.transaction_hash, - self.subscription.into_stream(), - ); - futures::pin_mut!(wait_for_stall_timeout, wait_for_invalidation); - - match futures::future::select(wait_for_stall_timeout, wait_for_invalidation).await { - Either::Left((_, _)) => { - log::trace!( - target: "bridge", - "{} transaction {:?} is considered lost after timeout (no status response from the node)", - C::NAME, - self.transaction_hash, - ); - - (TrackedTransactionStatus::Lost, None) - }, - Either::Right((invalidation_status, _)) => match invalidation_status { - InvalidationStatus::Finalized(at_block) => - (TrackedTransactionStatus::Finalized(at_block), Some(invalidation_status)), - InvalidationStatus::Invalid => - (TrackedTransactionStatus::Lost, Some(invalidation_status)), - InvalidationStatus::Lost => { - // wait for the rest of stall timeout - this way we'll be sure that the - // transaction is actually dead if it has been crafted properly - wait_for_stall_timeout_rest.await; - // if someone is still watching for our transaction, then we're reporting - // an error here (which is treated as "transaction lost") - log::trace!( - target: "bridge", - "{} transaction {:?} is considered lost after timeout", - C::NAME, - self.transaction_hash, - ); - - (TrackedTransactionStatus::Lost, Some(invalidation_status)) - }, - }, - } - } -} - -#[async_trait] -impl> relay_utils::TransactionTracker for TransactionTracker { - type HeaderId = HeaderIdOf; - - async fn wait(self) -> TrackedTransactionStatus> { - let wait_for_stall_timeout = async_std::task::sleep(self.stall_timeout).shared(); - let wait_for_stall_timeout_rest = wait_for_stall_timeout.clone(); - self.do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest).await.0 - } -} - -/// Transaction invalidation status. -/// -/// Note that in places where the `TransactionTracker` is used, the finalization event will be -/// ignored - relay loops are detecting the mining/finalization using their own -/// techniques. That's why we're using `InvalidationStatus` here. -#[derive(Debug, PartialEq)] -enum InvalidationStatus { - /// Transaction has been included into block and finalized at given block. - Finalized(BlockId), - /// Transaction has been invalidated. - Invalid, - /// We have lost track of transaction status. - Lost, -} - -/// Watch for transaction status until transaction is finalized or we lose track of its status. -async fn watch_transaction_status< - E: Environment, - C: Chain, - S: Stream>, ->( - environment: E, - transaction_hash: HashOf, - subscription: S, -) -> InvalidationStatus> { - futures::pin_mut!(subscription); - - loop { - match subscription.next().await { - Some(TransactionStatusOf::::Finalized((block_hash, _))) => { - // the only "successful" outcome of this method is when the block with transaction - // has been finalized - log::trace!( - target: "bridge", - "{} transaction {:?} has been finalized at block: {:?}", - C::NAME, - transaction_hash, - block_hash, - ); - - let header_id = match environment.header_id_by_hash(block_hash).await { - Ok(header_id) => header_id, - Err(e) => { - log::error!( - target: "bridge", - "Failed to read header {:?} when watching for {} transaction {:?}: {:?}", - block_hash, - C::NAME, - transaction_hash, - e, - ); - // that's the best option we have here - return InvalidationStatus::Lost - }, - }; - return InvalidationStatus::Finalized(header_id) - }, - Some(TransactionStatusOf::::Invalid) => { - // if node says that the transaction is invalid, there are still chances that - // it is not actually invalid - e.g. if the block where transaction has been - // revalidated is retracted and transaction (at some other node pool) becomes - // valid again on other fork. But let's assume that the chances of this event - // are almost zero - there's a lot of things that must happen for this to be the - // case. - log::trace!( - target: "bridge", - "{} transaction {:?} has been invalidated", - C::NAME, - transaction_hash, - ); - return InvalidationStatus::Invalid - }, - Some(TransactionStatusOf::::Future) | - Some(TransactionStatusOf::::Ready) | - Some(TransactionStatusOf::::Broadcast(_)) => { - // nothing important (for us) has happened - }, - Some(TransactionStatusOf::::InBlock(block_hash)) => { - // TODO: read matching system event (ExtrinsicSuccess or ExtrinsicFailed), log it - // here and use it later (on finality) for reporting invalid transaction - // https://github.com/paritytech/parity-bridges-common/issues/1464 - log::trace!( - target: "bridge", - "{} transaction {:?} has been included in block: {:?}", - C::NAME, - transaction_hash, - block_hash, - ); - }, - Some(TransactionStatusOf::::Retracted(block_hash)) => { - log::trace!( - target: "bridge", - "{} transaction {:?} at block {:?} has been retracted", - C::NAME, - transaction_hash, - block_hash, - ); - }, - Some(TransactionStatusOf::::FinalityTimeout(block_hash)) => { - // finality is lagging? let's wait a bit more and report a stall - log::trace!( - target: "bridge", - "{} transaction {:?} block {:?} has not been finalized for too long", - C::NAME, - transaction_hash, - block_hash, - ); - return InvalidationStatus::Lost - }, - Some(TransactionStatusOf::::Usurped(new_transaction_hash)) => { - // this may be result of our transaction resubmitter work or some manual - // intervention. In both cases - let's start stall timeout, because the meaning - // of transaction may have changed - log::trace!( - target: "bridge", - "{} transaction {:?} has been usurped by new transaction: {:?}", - C::NAME, - transaction_hash, - new_transaction_hash, - ); - return InvalidationStatus::Lost - }, - Some(TransactionStatusOf::::Dropped) => { - // the transaction has been removed from the pool because of its limits. Let's wait - // a bit and report a stall - log::trace!( - target: "bridge", - "{} transaction {:?} has been dropped from the pool", - C::NAME, - transaction_hash, - ); - return InvalidationStatus::Lost - }, - None => { - // the status of transaction is unknown to us (the subscription has been closed?). - // Let's wait a bit and report a stall - return InvalidationStatus::Lost - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_chain::TestChain; - use futures::{FutureExt, SinkExt}; - use sc_transaction_pool_api::TransactionStatus; - - struct TestEnvironment(Result, Error>); - - #[async_trait] - impl Environment for TestEnvironment { - async fn header_id_by_hash( - &self, - _hash: HashOf, - ) -> Result, Error> { - self.0.as_ref().map_err(|_| Error::BridgePalletIsNotInitialized).cloned() - } - } - - async fn on_transaction_status( - status: TransactionStatus, HashOf>, - ) -> Option<( - TrackedTransactionStatus>, - InvalidationStatus>, - )> { - let (mut sender, receiver) = futures::channel::mpsc::channel(1); - let tx_tracker = TransactionTracker::::new( - TestEnvironment(Ok(HeaderId(0, Default::default()))), - Duration::from_secs(0), - Default::default(), - Subscription(async_std::sync::Mutex::new(receiver)), - ); - - let wait_for_stall_timeout = futures::future::pending(); - let wait_for_stall_timeout_rest = futures::future::ready(()); - sender.send(Some(status)).await.unwrap(); - tx_tracker - .do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest) - .now_or_never() - .map(|(ts, is)| (ts, is.unwrap())) - } - - #[async_std::test] - async fn returns_finalized_on_finalized() { - assert_eq!( - on_transaction_status(TransactionStatus::Finalized(Default::default())).await, - Some(( - TrackedTransactionStatus::Finalized(Default::default()), - InvalidationStatus::Finalized(Default::default()) - )), - ); - } - - #[async_std::test] - async fn returns_lost_on_finalized_and_environment_error() { - assert_eq!( - watch_transaction_status::<_, TestChain, _>( - TestEnvironment(Err(Error::BridgePalletIsNotInitialized)), - Default::default(), - futures::stream::iter([TransactionStatus::Finalized(Default::default())]) - ) - .now_or_never(), - Some(InvalidationStatus::Lost), - ); - } - - #[async_std::test] - async fn returns_invalid_on_invalid() { - assert_eq!( - on_transaction_status(TransactionStatus::Invalid).await, - Some((TrackedTransactionStatus::Lost, InvalidationStatus::Invalid)), - ); - } - - #[async_std::test] - async fn waits_on_future() { - assert_eq!(on_transaction_status(TransactionStatus::Future).await, None,); - } - - #[async_std::test] - async fn waits_on_ready() { - assert_eq!(on_transaction_status(TransactionStatus::Ready).await, None,); - } - - #[async_std::test] - async fn waits_on_broadcast() { - assert_eq!( - on_transaction_status(TransactionStatus::Broadcast(Default::default())).await, - None, - ); - } - - #[async_std::test] - async fn waits_on_in_block() { - assert_eq!( - on_transaction_status(TransactionStatus::InBlock(Default::default())).await, - None, - ); - } - - #[async_std::test] - async fn waits_on_retracted() { - assert_eq!( - on_transaction_status(TransactionStatus::Retracted(Default::default())).await, - None, - ); - } - - #[async_std::test] - async fn lost_on_finality_timeout() { - assert_eq!( - on_transaction_status(TransactionStatus::FinalityTimeout(Default::default())).await, - Some((TrackedTransactionStatus::Lost, InvalidationStatus::Lost)), - ); - } - - #[async_std::test] - async fn lost_on_usurped() { - assert_eq!( - on_transaction_status(TransactionStatus::Usurped(Default::default())).await, - Some((TrackedTransactionStatus::Lost, InvalidationStatus::Lost)), - ); - } - - #[async_std::test] - async fn lost_on_dropped() { - assert_eq!( - on_transaction_status(TransactionStatus::Dropped).await, - Some((TrackedTransactionStatus::Lost, InvalidationStatus::Lost)), - ); - } - - #[async_std::test] - async fn lost_on_subscription_error() { - assert_eq!( - watch_transaction_status::<_, TestChain, _>( - TestEnvironment(Ok(HeaderId(0, Default::default()))), - Default::default(), - futures::stream::iter([]) - ) - .now_or_never(), - Some(InvalidationStatus::Lost), - ); - } - - #[async_std::test] - async fn lost_on_timeout_when_waiting_for_invalidation_status() { - let (_sender, receiver) = futures::channel::mpsc::channel(1); - let tx_tracker = TransactionTracker::::new( - TestEnvironment(Ok(HeaderId(0, Default::default()))), - Duration::from_secs(0), - Default::default(), - Subscription(async_std::sync::Mutex::new(receiver)), - ); - - let wait_for_stall_timeout = futures::future::ready(()).shared(); - let wait_for_stall_timeout_rest = wait_for_stall_timeout.clone(); - let wait_result = tx_tracker - .do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest) - .now_or_never(); - - assert_eq!(wait_result, Some((TrackedTransactionStatus::Lost, None))); - } -} diff --git a/relays/client-westend/Cargo.toml b/relays/client-westend/Cargo.toml deleted file mode 100644 index 1933a1f4130c657ad3e1b64fb150afa515cac9b6..0000000000000000000000000000000000000000 --- a/relays/client-westend/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "relay-westend-client" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5", features = ["derive"] } -scale-info = { version = "2.10.0", default-features = false, features = ["derive"] } -subxt = { version = "0.32.1", default-features = false, features = ["native"] } - -# Bridge dependencies - -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-runtime = { path = "../../primitives/runtime" } -bp-westend = { path = "../../primitives/chain-westend" } - -relay-substrate-client = { path = "../client-substrate" } -relay-utils = { path = "../utils" } - -# Substrate Dependencies - -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-session = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-weights = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/equivocation/Cargo.toml b/relays/equivocation/Cargo.toml deleted file mode 100644 index 0b4a7e983a3a8bf8a9f0e6bcc1430f03da22bf63..0000000000000000000000000000000000000000 --- a/relays/equivocation/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "equivocation-detector" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -description = "Equivocation detector" - -[lints] -workspace = true - -[dependencies] -async-std = { version = "1.6.5", features = ["attributes"] } -async-trait = "0.1" -bp-header-chain = { path = "../../primitives/header-chain" } -finality-relay = { path = "../finality" } -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -futures = "0.3.30" -log = { workspace = true } -num-traits = "0.2" -relay-utils = { path = "../utils" } diff --git a/relays/equivocation/src/block_checker.rs b/relays/equivocation/src/block_checker.rs deleted file mode 100644 index c8131e5b9796f1050785676156ad1096181299ab..0000000000000000000000000000000000000000 --- a/relays/equivocation/src/block_checker.rs +++ /dev/null @@ -1,471 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - handle_client_error, reporter::EquivocationsReporter, EquivocationDetectionPipeline, - EquivocationReportingContext, HeaderFinalityInfo, SourceClient, TargetClient, -}; - -use bp_header_chain::{FinalityProof, FindEquivocations as FindEquivocationsT}; -use finality_relay::FinalityProofsBuf; -use futures::future::{BoxFuture, FutureExt}; -use num_traits::Saturating; - -/// First step in the block checking state machine. -/// -/// Getting the finality info associated to the source headers synced with the target chain -/// at the specified block. -#[cfg_attr(test, derive(Debug, PartialEq))] -pub struct ReadSyncedHeaders { - pub target_block_num: P::TargetNumber, -} - -impl ReadSyncedHeaders

{ - pub async fn next>( - self, - target_client: &mut TC, - ) -> Result, Self> { - match target_client.synced_headers_finality_info(self.target_block_num).await { - Ok(synced_headers) => - Ok(ReadContext { target_block_num: self.target_block_num, synced_headers }), - Err(e) => { - log::error!( - target: "bridge", - "Could not get {} headers synced to {} at block {}: {e:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - self.target_block_num - ); - - // Reconnect target client in case of a connection error. - handle_client_error(target_client, e).await; - - Err(self) - }, - } - } -} - -/// Second step in the block checking state machine. -/// -/// Reading the equivocation reporting context from the target chain. -#[cfg_attr(test, derive(Debug))] -pub struct ReadContext { - target_block_num: P::TargetNumber, - synced_headers: Vec>, -} - -impl ReadContext

{ - pub async fn next>( - self, - target_client: &mut TC, - ) -> Result>, Self> { - match EquivocationReportingContext::try_read_from_target::( - target_client, - self.target_block_num.saturating_sub(1.into()), - ) - .await - { - Ok(Some(context)) => Ok(Some(FindEquivocations { - target_block_num: self.target_block_num, - synced_headers: self.synced_headers, - context, - })), - Ok(None) => Ok(None), - Err(e) => { - log::error!( - target: "bridge", - "Could not read {} `EquivocationReportingContext` from {} at block {}: {e:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - self.target_block_num.saturating_sub(1.into()), - ); - - // Reconnect target client in case of a connection error. - handle_client_error(target_client, e).await; - - Err(self) - }, - } - } -} - -/// Third step in the block checking state machine. -/// -/// Searching for equivocations in the source headers synced with the target chain. -#[cfg_attr(test, derive(Debug))] -pub struct FindEquivocations { - target_block_num: P::TargetNumber, - synced_headers: Vec>, - context: EquivocationReportingContext

, -} - -impl FindEquivocations

{ - pub fn next( - mut self, - finality_proofs_buf: &mut FinalityProofsBuf

, - ) -> Vec> { - let mut result = vec![]; - for synced_header in self.synced_headers { - match P::EquivocationsFinder::find_equivocations( - &self.context.synced_verification_context, - &synced_header.finality_proof, - finality_proofs_buf.buf().as_slice(), - ) { - Ok(equivocations) => - if !equivocations.is_empty() { - result.push(ReportEquivocations { - source_block_hash: self.context.synced_header_hash, - equivocations, - }) - }, - Err(e) => { - log::error!( - target: "bridge", - "Could not search for equivocations in the finality proof \ - for source header {:?} synced at target block {}: {e:?}", - synced_header.finality_proof.target_header_hash(), - self.target_block_num - ); - }, - }; - - finality_proofs_buf.prune(synced_header.finality_proof.target_header_number(), None); - self.context.update(synced_header); - } - - result - } -} - -/// Fourth step in the block checking state machine. -/// -/// Reporting the detected equivocations (if any). -#[cfg_attr(test, derive(Debug))] -pub struct ReportEquivocations { - source_block_hash: P::Hash, - equivocations: Vec, -} - -impl ReportEquivocations

{ - pub async fn next>( - mut self, - source_client: &mut SC, - reporter: &mut EquivocationsReporter<'_, P, SC>, - ) -> Result<(), Self> { - let mut unprocessed_equivocations = vec![]; - for equivocation in self.equivocations { - match reporter - .submit_report(source_client, self.source_block_hash, equivocation.clone()) - .await - { - Ok(_) => {}, - Err(e) => { - log::error!( - target: "bridge", - "Could not submit equivocation report to {} for {equivocation:?}: {e:?}", - P::SOURCE_NAME, - ); - - // Mark the equivocation as unprocessed - unprocessed_equivocations.push(equivocation); - // Reconnect source client in case of a connection error. - handle_client_error(source_client, e).await; - }, - } - } - - self.equivocations = unprocessed_equivocations; - if !self.equivocations.is_empty() { - return Err(self) - } - - Ok(()) - } -} - -/// Block checking state machine. -#[cfg_attr(test, derive(Debug))] -pub enum BlockChecker { - ReadSyncedHeaders(ReadSyncedHeaders

), - ReadContext(ReadContext

), - ReportEquivocations(Vec>), -} - -impl BlockChecker

{ - pub fn new(target_block_num: P::TargetNumber) -> Self { - Self::ReadSyncedHeaders(ReadSyncedHeaders { target_block_num }) - } - - pub fn run<'a, SC: SourceClient

, TC: TargetClient

>( - self, - source_client: &'a mut SC, - target_client: &'a mut TC, - finality_proofs_buf: &'a mut FinalityProofsBuf

, - reporter: &'a mut EquivocationsReporter, - ) -> BoxFuture<'a, Result<(), Self>> { - async move { - match self { - Self::ReadSyncedHeaders(state) => { - let read_context = - state.next(target_client).await.map_err(Self::ReadSyncedHeaders)?; - Self::ReadContext(read_context) - .run(source_client, target_client, finality_proofs_buf, reporter) - .await - }, - Self::ReadContext(state) => { - let maybe_find_equivocations = - state.next(target_client).await.map_err(Self::ReadContext)?; - let find_equivocations = match maybe_find_equivocations { - Some(find_equivocations) => find_equivocations, - None => return Ok(()), - }; - Self::ReportEquivocations(find_equivocations.next(finality_proofs_buf)) - .run(source_client, target_client, finality_proofs_buf, reporter) - .await - }, - Self::ReportEquivocations(state) => { - let mut failures = vec![]; - for report_equivocations in state { - if let Err(failure) = - report_equivocations.next(source_client, reporter).await - { - failures.push(failure); - } - } - - if !failures.is_empty() { - return Err(Self::ReportEquivocations(failures)) - } - - Ok(()) - }, - } - } - .boxed() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use std::collections::HashMap; - - impl PartialEq for ReadContext { - fn eq(&self, other: &Self) -> bool { - self.target_block_num == other.target_block_num && - self.synced_headers == other.synced_headers - } - } - - impl PartialEq for FindEquivocations { - fn eq(&self, other: &Self) -> bool { - self.target_block_num == other.target_block_num && - self.synced_headers == other.synced_headers && - self.context == other.context - } - } - - impl PartialEq for ReportEquivocations { - fn eq(&self, other: &Self) -> bool { - self.source_block_hash == other.source_block_hash && - self.equivocations == other.equivocations - } - } - - impl PartialEq for BlockChecker { - fn eq(&self, _other: &Self) -> bool { - matches!(self, _other) - } - } - - #[async_std::test] - async fn block_checker_works() { - let mut source_client = TestSourceClient { ..Default::default() }; - let mut target_client = TestTargetClient { - best_synced_header_hash: HashMap::from([(9, Ok(Some(5)))]), - finality_verification_context: HashMap::from([( - 9, - Ok(TestFinalityVerificationContext { check_equivocations: true }), - )]), - synced_headers_finality_info: HashMap::from([( - 10, - Ok(vec![ - new_header_finality_info(6, None), - new_header_finality_info(7, Some(false)), - new_header_finality_info(8, None), - new_header_finality_info(9, Some(true)), - new_header_finality_info(10, None), - new_header_finality_info(11, None), - new_header_finality_info(12, None), - ]), - )]), - ..Default::default() - }; - let mut reporter = - EquivocationsReporter::::new(); - - let block_checker = BlockChecker::new(10); - assert!(block_checker - .run( - &mut source_client, - &mut target_client, - &mut FinalityProofsBuf::new(vec![ - TestFinalityProof(6, vec!["6-1"]), - TestFinalityProof(7, vec![]), - TestFinalityProof(8, vec!["8-1"]), - TestFinalityProof(9, vec!["9-1"]), - TestFinalityProof(10, vec![]), - TestFinalityProof(11, vec!["11-1", "11-2"]), - TestFinalityProof(12, vec!["12-1"]) - ]), - &mut reporter - ) - .await - .is_ok()); - assert_eq!( - *source_client.reported_equivocations.lock().unwrap(), - HashMap::from([(5, vec!["6-1"]), (9, vec!["11-1", "11-2", "12-1"])]) - ); - } - - #[async_std::test] - async fn block_checker_works_with_empty_context() { - let mut target_client = TestTargetClient { - best_synced_header_hash: HashMap::from([(9, Ok(None))]), - finality_verification_context: HashMap::from([( - 9, - Ok(TestFinalityVerificationContext { check_equivocations: true }), - )]), - synced_headers_finality_info: HashMap::from([( - 10, - Ok(vec![new_header_finality_info(6, None)]), - )]), - ..Default::default() - }; - let mut source_client = TestSourceClient { ..Default::default() }; - let mut reporter = - EquivocationsReporter::::new(); - - let block_checker = BlockChecker::new(10); - assert!(block_checker - .run( - &mut source_client, - &mut target_client, - &mut FinalityProofsBuf::new(vec![TestFinalityProof(6, vec!["6-1"])]), - &mut reporter - ) - .await - .is_ok()); - assert_eq!(*source_client.reported_equivocations.lock().unwrap(), HashMap::default()); - } - - #[async_std::test] - async fn read_synced_headers_handles_errors() { - let mut target_client = TestTargetClient { - synced_headers_finality_info: HashMap::from([ - (10, Err(TestClientError::NonConnection)), - (11, Err(TestClientError::Connection)), - ]), - ..Default::default() - }; - let mut source_client = TestSourceClient { ..Default::default() }; - let mut reporter = - EquivocationsReporter::::new(); - - // NonConnection error - let block_checker = BlockChecker::new(10); - assert_eq!( - block_checker - .run( - &mut source_client, - &mut target_client, - &mut FinalityProofsBuf::new(vec![]), - &mut reporter - ) - .await, - Err(BlockChecker::ReadSyncedHeaders(ReadSyncedHeaders { target_block_num: 10 })) - ); - assert_eq!(target_client.num_reconnects, 0); - - // Connection error - let block_checker = BlockChecker::new(11); - assert_eq!( - block_checker - .run( - &mut source_client, - &mut target_client, - &mut FinalityProofsBuf::new(vec![]), - &mut reporter - ) - .await, - Err(BlockChecker::ReadSyncedHeaders(ReadSyncedHeaders { target_block_num: 11 })) - ); - assert_eq!(target_client.num_reconnects, 1); - } - - #[async_std::test] - async fn read_context_handles_errors() { - let mut target_client = TestTargetClient { - synced_headers_finality_info: HashMap::from([(10, Ok(vec![])), (11, Ok(vec![]))]), - best_synced_header_hash: HashMap::from([ - (9, Err(TestClientError::NonConnection)), - (10, Err(TestClientError::Connection)), - ]), - ..Default::default() - }; - let mut source_client = TestSourceClient { ..Default::default() }; - let mut reporter = - EquivocationsReporter::::new(); - - // NonConnection error - let block_checker = BlockChecker::new(10); - assert_eq!( - block_checker - .run( - &mut source_client, - &mut target_client, - &mut FinalityProofsBuf::new(vec![]), - &mut reporter - ) - .await, - Err(BlockChecker::ReadContext(ReadContext { - target_block_num: 10, - synced_headers: vec![] - })) - ); - assert_eq!(target_client.num_reconnects, 0); - - // Connection error - let block_checker = BlockChecker::new(11); - assert_eq!( - block_checker - .run( - &mut source_client, - &mut target_client, - &mut FinalityProofsBuf::new(vec![]), - &mut reporter - ) - .await, - Err(BlockChecker::ReadContext(ReadContext { - target_block_num: 11, - synced_headers: vec![] - })) - ); - assert_eq!(target_client.num_reconnects, 1); - } -} diff --git a/relays/equivocation/src/equivocation_loop.rs b/relays/equivocation/src/equivocation_loop.rs deleted file mode 100644 index dfc4af0d4f62b21baa681aeb6ac6fd638e3e39a5..0000000000000000000000000000000000000000 --- a/relays/equivocation/src/equivocation_loop.rs +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - handle_client_error, reporter::EquivocationsReporter, EquivocationDetectionPipeline, - SourceClient, TargetClient, -}; - -use crate::block_checker::BlockChecker; -use finality_relay::{FinalityProofsBuf, FinalityProofsStream}; -use futures::{select_biased, FutureExt}; -use num_traits::Saturating; -use relay_utils::{metrics::MetricsParams, FailedClient}; -use std::{future::Future, time::Duration}; - -/// Equivocations detection loop state. -struct EquivocationDetectionLoop< - P: EquivocationDetectionPipeline, - SC: SourceClient

, - TC: TargetClient

, -> { - source_client: SC, - target_client: TC, - - from_block_num: Option, - until_block_num: Option, - - reporter: EquivocationsReporter<'static, P, SC>, - - finality_proofs_stream: FinalityProofsStream, - finality_proofs_buf: FinalityProofsBuf

, -} - -impl, TC: TargetClient

> - EquivocationDetectionLoop -{ - async fn ensure_finality_proofs_stream(&mut self) { - match self.finality_proofs_stream.ensure_stream(&self.source_client).await { - Ok(_) => {}, - Err(e) => { - log::error!( - target: "bridge", - "Could not connect to the {} `FinalityProofsStream`: {e:?}", - P::SOURCE_NAME, - ); - - // Reconnect to the source client if needed - handle_client_error(&mut self.source_client, e).await; - }, - } - } - - async fn best_finalized_target_block_number(&mut self) -> Option { - match self.target_client.best_finalized_header_number().await { - Ok(block_num) => Some(block_num), - Err(e) => { - log::error!( - target: "bridge", - "Could not read best finalized header number from {}: {e:?}", - P::TARGET_NAME, - ); - - // Reconnect target client and move on - handle_client_error(&mut self.target_client, e).await; - - None - }, - } - } - - async fn do_run(&mut self, tick: Duration, exit_signal: impl Future) { - let exit_signal = exit_signal.fuse(); - futures::pin_mut!(exit_signal); - - loop { - // Make sure that we are connected to the source finality proofs stream. - self.ensure_finality_proofs_stream().await; - // Check the status of the pending equivocation reports - self.reporter.process_pending_reports().await; - - // Update blocks range. - if let Some(block_number) = self.best_finalized_target_block_number().await { - self.from_block_num.get_or_insert(block_number); - self.until_block_num = Some(block_number); - } - let (from, until) = match (self.from_block_num, self.until_block_num) { - (Some(from), Some(until)) => (from, until), - _ => continue, - }; - - // Check the available blocks - let mut current_block_number = from; - while current_block_number <= until { - self.finality_proofs_buf.fill(&mut self.finality_proofs_stream); - let block_checker = BlockChecker::new(current_block_number); - let _ = block_checker - .run( - &mut self.source_client, - &mut self.target_client, - &mut self.finality_proofs_buf, - &mut self.reporter, - ) - .await; - current_block_number = current_block_number.saturating_add(1.into()); - } - self.from_block_num = Some(current_block_number); - - select_biased! { - _ = exit_signal => return, - _ = async_std::task::sleep(tick).fuse() => {}, - } - } - } - - pub async fn run( - source_client: SC, - target_client: TC, - tick: Duration, - exit_signal: impl Future, - ) -> Result<(), FailedClient> { - let mut equivocation_detection_loop = Self { - source_client, - target_client, - from_block_num: None, - until_block_num: None, - reporter: EquivocationsReporter::::new(), - finality_proofs_stream: FinalityProofsStream::new(), - finality_proofs_buf: FinalityProofsBuf::new(vec![]), - }; - - equivocation_detection_loop.do_run(tick, exit_signal).await; - Ok(()) - } -} - -/// Spawn the equivocations detection loop. -pub async fn run( - source_client: impl SourceClient

, - target_client: impl TargetClient

, - tick: Duration, - metrics_params: MetricsParams, - exit_signal: impl Future + 'static + Send, -) -> Result<(), relay_utils::Error> { - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .with_metrics(metrics_params) - .expose() - .await? - .run( - format!("{}_to_{}_EquivocationDetection", P::SOURCE_NAME, P::TARGET_NAME), - move |source_client, target_client, _metrics| { - EquivocationDetectionLoop::run( - source_client, - target_client, - tick, - exit_signal.clone(), - ) - }, - ) - .await -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use futures::{channel::mpsc::UnboundedSender, StreamExt}; - use std::{ - collections::{HashMap, VecDeque}, - sync::{Arc, Mutex}, - }; - - fn best_finalized_header_number( - best_finalized_headers: &Mutex>>, - exit_sender: &UnboundedSender<()>, - ) -> Result { - let mut best_finalized_headers = best_finalized_headers.lock().unwrap(); - let result = best_finalized_headers.pop_front().unwrap(); - if best_finalized_headers.is_empty() { - exit_sender.unbounded_send(()).unwrap(); - } - result - } - - #[async_std::test] - async fn multiple_blocks_are_checked_correctly() { - let best_finalized_headers = Arc::new(Mutex::new(VecDeque::from([Ok(10), Ok(12), Ok(13)]))); - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - - let source_client = TestSourceClient { - finality_proofs: Arc::new(Mutex::new(vec![ - TestFinalityProof(2, vec!["2-1"]), - TestFinalityProof(3, vec!["3-1", "3-2"]), - TestFinalityProof(4, vec!["4-1"]), - TestFinalityProof(5, vec!["5-1"]), - TestFinalityProof(6, vec!["6-1", "6-2"]), - TestFinalityProof(7, vec!["7-1", "7-2"]), - ])), - ..Default::default() - }; - let reported_equivocations = source_client.reported_equivocations.clone(); - let target_client = TestTargetClient { - best_finalized_header_number: Arc::new(move || { - best_finalized_header_number(&best_finalized_headers, &exit_sender) - }), - best_synced_header_hash: HashMap::from([ - (9, Ok(Some(1))), - (10, Ok(Some(3))), - (11, Ok(Some(5))), - (12, Ok(Some(6))), - ]), - finality_verification_context: HashMap::from([ - (9, Ok(TestFinalityVerificationContext { check_equivocations: true })), - (10, Ok(TestFinalityVerificationContext { check_equivocations: true })), - (11, Ok(TestFinalityVerificationContext { check_equivocations: false })), - (12, Ok(TestFinalityVerificationContext { check_equivocations: true })), - ]), - synced_headers_finality_info: HashMap::from([ - ( - 10, - Ok(vec![new_header_finality_info(2, None), new_header_finality_info(3, None)]), - ), - ( - 11, - Ok(vec![ - new_header_finality_info(4, None), - new_header_finality_info(5, Some(false)), - ]), - ), - (12, Ok(vec![new_header_finality_info(6, None)])), - (13, Ok(vec![new_header_finality_info(7, None)])), - ]), - ..Default::default() - }; - - assert!(run::( - source_client, - target_client, - Duration::from_secs(0), - MetricsParams { address: None, registry: Default::default() }, - exit_receiver.into_future().map(|(_, _)| ()), - ) - .await - .is_ok()); - assert_eq!( - *reported_equivocations.lock().unwrap(), - HashMap::from([ - (1, vec!["2-1", "3-1", "3-2"]), - (3, vec!["4-1", "5-1"]), - (6, vec!["7-1", "7-2"]) - ]) - ); - } - - #[async_std::test] - async fn blocks_following_error_are_checked_correctly() { - let best_finalized_headers = Mutex::new(VecDeque::from([Ok(10), Ok(11)])); - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - - let source_client = TestSourceClient { - finality_proofs: Arc::new(Mutex::new(vec![ - TestFinalityProof(2, vec!["2-1"]), - TestFinalityProof(3, vec!["3-1"]), - ])), - ..Default::default() - }; - let reported_equivocations = source_client.reported_equivocations.clone(); - let target_client = TestTargetClient { - best_finalized_header_number: Arc::new(move || { - best_finalized_header_number(&best_finalized_headers, &exit_sender) - }), - best_synced_header_hash: HashMap::from([(9, Ok(Some(1))), (10, Ok(Some(2)))]), - finality_verification_context: HashMap::from([ - (9, Ok(TestFinalityVerificationContext { check_equivocations: true })), - (10, Ok(TestFinalityVerificationContext { check_equivocations: true })), - ]), - synced_headers_finality_info: HashMap::from([ - (10, Err(TestClientError::NonConnection)), - (11, Ok(vec![new_header_finality_info(3, None)])), - ]), - ..Default::default() - }; - - assert!(run::( - source_client, - target_client, - Duration::from_secs(0), - MetricsParams { address: None, registry: Default::default() }, - exit_receiver.into_future().map(|(_, _)| ()), - ) - .await - .is_ok()); - assert_eq!(*reported_equivocations.lock().unwrap(), HashMap::from([(2, vec!["3-1"]),])); - } -} diff --git a/relays/equivocation/src/lib.rs b/relays/equivocation/src/lib.rs deleted file mode 100644 index 56a71ef3bc63c422e336c27891b58aee682d605a..0000000000000000000000000000000000000000 --- a/relays/equivocation/src/lib.rs +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -mod block_checker; -mod equivocation_loop; -mod mock; -mod reporter; - -use async_trait::async_trait; -use bp_header_chain::{FinalityProof, FindEquivocations}; -use finality_relay::{FinalityPipeline, SourceClientBase}; -use relay_utils::{relay_loop::Client as RelayClient, MaybeConnectionError, TransactionTracker}; -use std::{fmt::Debug, time::Duration}; - -pub use equivocation_loop::run; - -#[cfg(not(test))] -const RECONNECT_DELAY: Duration = relay_utils::relay_loop::RECONNECT_DELAY; -#[cfg(test)] -const RECONNECT_DELAY: Duration = mock::TEST_RECONNECT_DELAY; - -pub trait EquivocationDetectionPipeline: FinalityPipeline { - /// Block number of the target chain. - type TargetNumber: relay_utils::BlockNumberBase; - /// The context needed for validating finality proofs. - type FinalityVerificationContext: Debug + Send; - /// The type of the equivocation proof. - type EquivocationProof: Clone + Debug + Send + Sync; - /// The equivocations finder. - type EquivocationsFinder: FindEquivocations< - Self::FinalityProof, - Self::FinalityVerificationContext, - Self::EquivocationProof, - >; -} - -type HeaderFinalityInfo

= bp_header_chain::HeaderFinalityInfo< -

::FinalityProof, -

::FinalityVerificationContext, ->; - -/// Source client used in equivocation detection loop. -#[async_trait] -pub trait SourceClient: SourceClientBase

{ - /// Transaction tracker to track submitted transactions. - type TransactionTracker: TransactionTracker; - - /// Report equivocation. - async fn report_equivocation( - &self, - at: P::Hash, - equivocation: P::EquivocationProof, - ) -> Result; -} - -/// Target client used in equivocation detection loop. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Get the best finalized header number. - async fn best_finalized_header_number(&self) -> Result; - - /// Get the hash of the best source header known by the target at the provided block number. - async fn best_synced_header_hash( - &self, - at: P::TargetNumber, - ) -> Result, Self::Error>; - - /// Get the data stored by the target at the specified block for validating source finality - /// proofs. - async fn finality_verification_context( - &self, - at: P::TargetNumber, - ) -> Result; - - /// Get the finality info associated to the source headers synced with the target chain at the - /// specified block. - async fn synced_headers_finality_info( - &self, - at: P::TargetNumber, - ) -> Result>, Self::Error>; -} - -/// The context needed for finding equivocations inside finality proofs and reporting them. -#[derive(Debug, PartialEq)] -struct EquivocationReportingContext { - pub synced_header_hash: P::Hash, - pub synced_verification_context: P::FinalityVerificationContext, -} - -impl EquivocationReportingContext

{ - /// Try to get the `EquivocationReportingContext` used by the target chain - /// at the provided block. - pub async fn try_read_from_target>( - target_client: &TC, - at: P::TargetNumber, - ) -> Result, TC::Error> { - let maybe_best_synced_header_hash = target_client.best_synced_header_hash(at).await?; - Ok(match maybe_best_synced_header_hash { - Some(best_synced_header_hash) => Some(EquivocationReportingContext { - synced_header_hash: best_synced_header_hash, - synced_verification_context: target_client - .finality_verification_context(at) - .await?, - }), - None => None, - }) - } - - /// Update with the new context introduced by the `HeaderFinalityInfo

` if any. - pub fn update(&mut self, info: HeaderFinalityInfo

) { - if let Some(new_verification_context) = info.new_verification_context { - self.synced_header_hash = info.finality_proof.target_header_hash(); - self.synced_verification_context = new_verification_context; - } - } -} - -async fn handle_client_error(client: &mut C, e: C::Error) { - if e.is_connection_error() { - client.reconnect_until_success(RECONNECT_DELAY).await; - } else { - async_std::task::sleep(RECONNECT_DELAY).await; - } -} diff --git a/relays/equivocation/src/mock.rs b/relays/equivocation/src/mock.rs deleted file mode 100644 index ced5c6f3580652a0d044171171a2a87fab1854cf..0000000000000000000000000000000000000000 --- a/relays/equivocation/src/mock.rs +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -#![cfg(test)] - -use crate::{EquivocationDetectionPipeline, HeaderFinalityInfo, SourceClient, TargetClient}; -use async_trait::async_trait; -use bp_header_chain::{FinalityProof, FindEquivocations}; -use finality_relay::{FinalityPipeline, SourceClientBase}; -use futures::{Stream, StreamExt}; -use relay_utils::{ - relay_loop::Client as RelayClient, HeaderId, MaybeConnectionError, TrackedTransactionStatus, - TransactionTracker, -}; -use std::{ - collections::HashMap, - pin::Pin, - sync::{Arc, Mutex}, - time::Duration, -}; - -pub type TestSourceHashAndNumber = u64; -pub type TestTargetNumber = u64; -pub type TestEquivocationProof = &'static str; - -pub const TEST_RECONNECT_DELAY: Duration = Duration::from_secs(0); - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TestFinalityProof(pub TestSourceHashAndNumber, pub Vec); - -impl FinalityProof for TestFinalityProof { - fn target_header_hash(&self) -> TestSourceHashAndNumber { - self.0 - } - - fn target_header_number(&self) -> TestSourceHashAndNumber { - self.0 - } -} - -#[derive(Debug, Clone, PartialEq)] -pub struct TestEquivocationDetectionPipeline; - -impl FinalityPipeline for TestEquivocationDetectionPipeline { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type Hash = TestSourceHashAndNumber; - type Number = TestSourceHashAndNumber; - type FinalityProof = TestFinalityProof; -} - -#[derive(Clone, Debug, PartialEq)] -pub struct TestFinalityVerificationContext { - pub check_equivocations: bool, -} - -pub struct TestEquivocationsFinder; - -impl FindEquivocations - for TestEquivocationsFinder -{ - type Error = (); - - fn find_equivocations( - verification_context: &TestFinalityVerificationContext, - synced_proof: &TestFinalityProof, - source_proofs: &[TestFinalityProof], - ) -> Result, Self::Error> { - if verification_context.check_equivocations { - // Get the equivocations from the source proofs, in order to make sure - // that they are correctly provided. - if let Some(proof) = source_proofs.iter().find(|proof| proof.0 == synced_proof.0) { - return Ok(proof.1.clone()) - } - } - - Ok(vec![]) - } -} - -impl EquivocationDetectionPipeline for TestEquivocationDetectionPipeline { - type TargetNumber = TestTargetNumber; - type FinalityVerificationContext = TestFinalityVerificationContext; - type EquivocationProof = TestEquivocationProof; - type EquivocationsFinder = TestEquivocationsFinder; -} - -#[derive(Debug, Clone)] -pub enum TestClientError { - Connection, - NonConnection, -} - -impl MaybeConnectionError for TestClientError { - fn is_connection_error(&self) -> bool { - match self { - TestClientError::Connection => true, - TestClientError::NonConnection => false, - } - } -} - -#[derive(Clone)] -pub struct TestSourceClient { - pub num_reconnects: u32, - pub finality_proofs: Arc>>, - pub reported_equivocations: - Arc>>>, -} - -impl Default for TestSourceClient { - fn default() -> Self { - Self { - num_reconnects: 0, - finality_proofs: Arc::new(Mutex::new(vec![])), - reported_equivocations: Arc::new(Mutex::new(Default::default())), - } - } -} - -#[async_trait] -impl RelayClient for TestSourceClient { - type Error = TestClientError; - - async fn reconnect(&mut self) -> Result<(), Self::Error> { - self.num_reconnects += 1; - - Ok(()) - } -} - -#[async_trait] -impl SourceClientBase for TestSourceClient { - type FinalityProofsStream = Pin + 'static + Send>>; - - async fn finality_proofs(&self) -> Result { - let finality_proofs = std::mem::take(&mut *self.finality_proofs.lock().unwrap()); - Ok(futures::stream::iter(finality_proofs).boxed()) - } -} - -#[derive(Clone, Debug)] -pub struct TestTransactionTracker( - pub TrackedTransactionStatus>, -); - -impl Default for TestTransactionTracker { - fn default() -> TestTransactionTracker { - TestTransactionTracker(TrackedTransactionStatus::Finalized(Default::default())) - } -} - -#[async_trait] -impl TransactionTracker for TestTransactionTracker { - type HeaderId = HeaderId; - - async fn wait( - self, - ) -> TrackedTransactionStatus> { - self.0 - } -} - -#[async_trait] -impl SourceClient for TestSourceClient { - type TransactionTracker = TestTransactionTracker; - - async fn report_equivocation( - &self, - at: TestSourceHashAndNumber, - equivocation: TestEquivocationProof, - ) -> Result { - self.reported_equivocations - .lock() - .unwrap() - .entry(at) - .or_default() - .push(equivocation); - - Ok(TestTransactionTracker::default()) - } -} - -#[derive(Clone)] -pub struct TestTargetClient { - pub num_reconnects: u32, - pub best_finalized_header_number: - Arc Result + Send + Sync>, - pub best_synced_header_hash: - HashMap, TestClientError>>, - pub finality_verification_context: - HashMap>, - pub synced_headers_finality_info: HashMap< - TestTargetNumber, - Result>, TestClientError>, - >, -} - -impl Default for TestTargetClient { - fn default() -> Self { - Self { - num_reconnects: 0, - best_finalized_header_number: Arc::new(|| Ok(0)), - best_synced_header_hash: Default::default(), - finality_verification_context: Default::default(), - synced_headers_finality_info: Default::default(), - } - } -} - -#[async_trait] -impl RelayClient for TestTargetClient { - type Error = TestClientError; - - async fn reconnect(&mut self) -> Result<(), Self::Error> { - self.num_reconnects += 1; - - Ok(()) - } -} - -#[async_trait] -impl TargetClient for TestTargetClient { - async fn best_finalized_header_number(&self) -> Result { - (self.best_finalized_header_number)() - } - - async fn best_synced_header_hash( - &self, - at: TestTargetNumber, - ) -> Result, Self::Error> { - self.best_synced_header_hash - .get(&at) - .unwrap_or(&Err(TestClientError::NonConnection)) - .clone() - } - - async fn finality_verification_context( - &self, - at: TestTargetNumber, - ) -> Result { - self.finality_verification_context - .get(&at) - .unwrap_or(&Err(TestClientError::NonConnection)) - .clone() - } - - async fn synced_headers_finality_info( - &self, - at: TestTargetNumber, - ) -> Result>, Self::Error> { - self.synced_headers_finality_info - .get(&at) - .unwrap_or(&Err(TestClientError::NonConnection)) - .clone() - } -} - -pub fn new_header_finality_info( - source_hdr: TestSourceHashAndNumber, - check_following_equivocations: Option, -) -> HeaderFinalityInfo { - HeaderFinalityInfo:: { - finality_proof: TestFinalityProof(source_hdr, vec![]), - new_verification_context: check_following_equivocations.map( - |check_following_equivocations| TestFinalityVerificationContext { - check_equivocations: check_following_equivocations, - }, - ), - } -} diff --git a/relays/equivocation/src/reporter.rs b/relays/equivocation/src/reporter.rs deleted file mode 100644 index 9c4642383d1648b9eae81a5eb0b853fb07efd5d6..0000000000000000000000000000000000000000 --- a/relays/equivocation/src/reporter.rs +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Helper struct used for submitting finality reports and tracking their status. - -use crate::{EquivocationDetectionPipeline, SourceClient}; - -use futures::FutureExt; -use relay_utils::{TrackedTransactionFuture, TrackedTransactionStatus, TransactionTracker}; -use std::{ - future::poll_fn, - task::{Context, Poll}, -}; - -pub struct EquivocationsReporter<'a, P: EquivocationDetectionPipeline, SC: SourceClient

> { - pending_reports: Vec>, -} - -impl<'a, P: EquivocationDetectionPipeline, SC: SourceClient

> EquivocationsReporter<'a, P, SC> { - pub fn new() -> Self { - Self { pending_reports: vec![] } - } - - /// Submit a `report_equivocation()` transaction to the source chain. - /// - /// We store the transaction tracker for future monitoring. - pub async fn submit_report( - &mut self, - source_client: &SC, - at: P::Hash, - equivocation: P::EquivocationProof, - ) -> Result<(), SC::Error> { - let pending_report = source_client.report_equivocation(at, equivocation).await?; - self.pending_reports.push(pending_report.wait()); - - Ok(()) - } - - fn do_process_pending_reports(&mut self, cx: &mut Context<'_>) -> Poll<()> { - self.pending_reports.retain_mut(|pending_report| { - match pending_report.poll_unpin(cx) { - Poll::Ready(tx_status) => { - match tx_status { - TrackedTransactionStatus::Lost => { - log::error!(target: "bridge", "Equivocation report tx was lost"); - }, - TrackedTransactionStatus::Finalized(id) => { - log::error!(target: "bridge", "Equivocation report tx was finalized in source block {id:?}"); - }, - } - - // The future was processed. Drop it. - false - }, - Poll::Pending => { - // The future is still pending. Retain it. - true - }, - } - }); - - Poll::Ready(()) - } - - /// Iterate through all the pending `report_equivocation()` transactions - /// and log the ones that finished. - pub async fn process_pending_reports(&mut self) { - poll_fn(|cx| self.do_process_pending_reports(cx)).await - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use relay_utils::HeaderId; - use std::sync::Mutex; - - #[async_std::test] - async fn process_pending_reports_works() { - let polled_reports = Mutex::new(vec![]); - let finished_reports = Mutex::new(vec![]); - - let mut reporter = - EquivocationsReporter:: { - pending_reports: vec![ - Box::pin(async { - polled_reports.lock().unwrap().push(1); - finished_reports.lock().unwrap().push(1); - TrackedTransactionStatus::Finalized(HeaderId(1, 1)) - }), - Box::pin(async { - polled_reports.lock().unwrap().push(2); - finished_reports.lock().unwrap().push(2); - TrackedTransactionStatus::Finalized(HeaderId(2, 2)) - }), - Box::pin(async { - polled_reports.lock().unwrap().push(3); - std::future::pending::<()>().await; - finished_reports.lock().unwrap().push(3); - TrackedTransactionStatus::Finalized(HeaderId(3, 3)) - }), - Box::pin(async { - polled_reports.lock().unwrap().push(4); - finished_reports.lock().unwrap().push(4); - TrackedTransactionStatus::Finalized(HeaderId(4, 4)) - }), - ], - }; - - reporter.process_pending_reports().await; - assert_eq!(*polled_reports.lock().unwrap(), vec![1, 2, 3, 4]); - assert_eq!(*finished_reports.lock().unwrap(), vec![1, 2, 4]); - assert_eq!(reporter.pending_reports.len(), 1); - } -} diff --git a/relays/finality/Cargo.toml b/relays/finality/Cargo.toml deleted file mode 100644 index 9e8bf56f53aaac0ec4fd1b9b51ff57334570340d..0000000000000000000000000000000000000000 --- a/relays/finality/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "finality-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -description = "Finality proofs relay" - -[lints] -workspace = true - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1" -backoff = "0.4" -bp-header-chain = { path = "../../primitives/header-chain" } -futures = "0.3.30" -log = { workspace = true } -num-traits = "0.2" -relay-utils = { path = "../utils" } - -[dev-dependencies] -parking_lot = "0.12.1" diff --git a/relays/finality/README.md b/relays/finality/README.md deleted file mode 100644 index b501611ca026121654fc6858c71faea9a61b94fe..0000000000000000000000000000000000000000 --- a/relays/finality/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# GRANDPA Finality Relay - -The finality relay is able to work with different finality engines. In the modern Substrate world they are GRANDPA -and BEEFY. Let's talk about GRANDPA here, because BEEFY relay and bridge BEEFY pallet are in development. - -In general, the relay works as follows: it connects to the source and target chain. The source chain must have the -[GRANDPA gadget](https://github.com/paritytech/finality-grandpa) running (so it can't be a parachain). The target -chain must have the [bridge GRANDPA pallet](../../modules/grandpa/) deployed at its runtime. The relay subscribes -to the GRANDPA finality notifications at the source chain and when the new justification is received, it is submitted -to the pallet at the target chain. - -Apart from that, the relay is watching for every source header that is missing at target. If it finds the missing -mandatory header (header that is changing the current GRANDPA validators set), it submits the justification for -this header. The case when the source node can't return the mandatory justification is considered a fatal error, -because the pallet can't proceed without it. - -More: [GRANDPA Finality Relay Sequence Diagram](../../docs/grandpa-finality-relay.html). - -## How to Use the Finality Relay - -The most important trait is the [`FinalitySyncPipeline`](./src/lib.rs), which defines the basic primitives of the -source chain (like block hash and number) and the type of finality proof (GRANDPA justification or MMR proof). Once -that is defined, there are two other traits - [`SourceClient`](./src/finality_loop.rs) and -[`TargetClient`](./src/finality_loop.rs). - -The `SourceClient` represents the Substrate node client that connects to the source chain. The client needs to -be able to return the best finalized header number, finalized header and its finality proof and the stream of -finality proofs. - -The `TargetClient` implementation must be able to craft finality delivery transaction and submit it to the target -node. The transaction is then tracked by the relay until it is mined and finalized. - -The main entrypoint for the crate is the [`run` function](./src/finality_loop.rs), which takes source and target -clients and [`FinalitySyncParams`](./src/finality_loop.rs) parameters. The most important parameter is the -`only_mandatory_headers` - it is set to `true`, the relay will only submit mandatory headers. Since transactions -with mandatory headers are fee-free, the cost of running such relay is zero (in terms of fees). If a similar, -`only_free_headers` parameter, is set to `true`, then free headers (if configured in the runtime) are also -relayed. - -## Finality Relay Metrics - -Finality relay provides several metrics. Metrics names depend on names of source and target chains. The list below -shows metrics names for Rococo (source chain) to BridgeHubWestend (target chain) finality relay. For other -chains, simply change chain names. So the metrics are: - -- `Rococo_to_BridgeHubWestend_Sync_best_source_block_number` - returns best finalized source chain (Rococo) block number, known - to the relay. If relay is running in [on-demand mode](../bin-substrate/src/cli/relay_headers_and_messages/), the - number may not match (it may be far behind) the actual best finalized number; - -- `Rococo_to_BridgeHubWestend_Sync_best_source_at_target_block_number` - returns best finalized source chain (Rococo) block - number that is known to the bridge GRANDPA pallet at the target chain. - -- `Rococo_to_BridgeHubWestend_Sync_is_source_and_source_at_target_using_different_forks` - if this metrics is set to `1`, then - the best source chain header, known to the target chain doesn't match the same-number-header at the source chain. - It means that the GRANDPA validators set has crafted the duplicate justification and it has been submitted to the - target chain. Normally (if majority of validators are honest and if you're running finality relay without large - breaks) this shall not happen and the metric will have `0` value. - -If relay operates properly, you should see that the `Rococo_to_BridgeHubWestend_Sync_best_source_at_target_block_number` -tries to reach the `Rococo_to_BridgeHubWestend_Sync_best_source_block_number`. And the latter one always increases. diff --git a/relays/finality/src/base.rs b/relays/finality/src/base.rs deleted file mode 100644 index 4253468eaace1ef2a2adc47790f7e16c38160200..0000000000000000000000000000000000000000 --- a/relays/finality/src/base.rs +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use async_trait::async_trait; -use bp_header_chain::FinalityProof; -use futures::Stream; -use relay_utils::relay_loop::Client as RelayClient; -use std::fmt::Debug; - -/// Base finality pipeline. -pub trait FinalityPipeline: 'static + Clone + Debug + Send + Sync { - /// Name of the finality proofs source. - const SOURCE_NAME: &'static str; - /// Name of the finality proofs target. - const TARGET_NAME: &'static str; - - /// Synced headers are identified by this hash. - type Hash: Eq + Clone + Copy + Send + Sync + Debug; - /// Synced headers are identified by this number. - type Number: relay_utils::BlockNumberBase; - /// Finality proof type. - type FinalityProof: FinalityProof; -} - -/// Source client used in finality related loops. -#[async_trait] -pub trait SourceClientBase: RelayClient { - /// Stream of new finality proofs. The stream is allowed to miss proofs for some - /// headers, even if those headers are mandatory. - type FinalityProofsStream: Stream + Send + Unpin; - - /// Subscribe to new finality proofs. - async fn finality_proofs(&self) -> Result; -} - -/// Target client used in finality related loops. -#[async_trait] -pub trait TargetClientBase: RelayClient {} diff --git a/relays/finality/src/finality_loop.rs b/relays/finality/src/finality_loop.rs deleted file mode 100644 index 8b3def868a453703600850a463cf2f07988811df..0000000000000000000000000000000000000000 --- a/relays/finality/src/finality_loop.rs +++ /dev/null @@ -1,798 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The loop basically reads all missing headers and their finality proofs from the source client. -//! The proof for the best possible header is then submitted to the target node. The only exception -//! is the mandatory headers, which we always submit to the target node. For such headers, we -//! assume that the persistent proof either exists, or will eventually become available. - -use crate::{sync_loop_metrics::SyncLoopMetrics, Error, FinalitySyncPipeline, SourceHeader}; - -use crate::{ - base::SourceClientBase, - finality_proofs::{FinalityProofsBuf, FinalityProofsStream}, - headers::{JustifiedHeader, JustifiedHeaderSelector}, -}; -use async_trait::async_trait; -use backoff::{backoff::Backoff, ExponentialBackoff}; -use futures::{future::Fuse, select, Future, FutureExt}; -use num_traits::{Saturating, Zero}; -use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, retry_backoff, FailedClient, - HeaderId, MaybeConnectionError, TrackedTransactionStatus, TransactionTracker, -}; -use std::{ - fmt::Debug, - time::{Duration, Instant}, -}; - -/// Type of headers that we relay. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum HeadersToRelay { - /// Relay all headers. - All, - /// Relay only mandatory headers. - Mandatory, - /// Relay only free (including mandatory) headers. - Free, -} - -/// Finality proof synchronization loop parameters. -#[derive(Debug, Clone)] -pub struct FinalitySyncParams { - /// Interval at which we check updates on both clients. Normally should be larger than - /// `min(source_block_time, target_block_time)`. - /// - /// This parameter may be used to limit transactions rate. Increase the value && you'll get - /// infrequent updates => sparse headers => potential slow down of bridge applications, but - /// pallet storage won't be super large. Decrease the value to near `source_block_time` and - /// you'll get transaction for (almost) every block of the source chain => all source headers - /// will be known to the target chain => bridge applications will run faster, but pallet - /// storage may explode (but if pruning is there, then it's fine). - pub tick: Duration, - /// Number of finality proofs to keep in internal buffer between loop iterations. - /// - /// While in "major syncing" state, we still read finality proofs from the stream. They're - /// stored in the internal buffer between loop iterations. When we're close to the tip of the - /// chain, we may meet finality delays if headers are not finalized frequently. So instead of - /// waiting for next finality proof to appear in the stream, we may use existing proof from - /// that buffer. - pub recent_finality_proofs_limit: usize, - /// Timeout before we treat our transactions as lost and restart the whole sync process. - pub stall_timeout: Duration, - /// If true, only mandatory headers are relayed. - pub headers_to_relay: HeadersToRelay, -} - -/// Source client used in finality synchronization loop. -#[async_trait] -pub trait SourceClient: SourceClientBase

{ - /// Get best finalized block number. - async fn best_finalized_block_number(&self) -> Result; - - /// Get canonical header and its finality proof by number. - async fn header_and_finality_proof( - &self, - number: P::Number, - ) -> Result<(P::Header, Option), Self::Error>; -} - -/// Target client used in finality synchronization loop. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Transaction tracker to track submitted transactions. - type TransactionTracker: TransactionTracker; - - /// Get best finalized source block number. - async fn best_finalized_source_block_id( - &self, - ) -> Result, Self::Error>; - - /// Get free source headers submission interval, if it is configured in the - /// target runtime. - async fn free_source_headers_interval(&self) -> Result, Self::Error>; - - /// Submit header finality proof. - async fn submit_finality_proof( - &self, - header: P::Header, - proof: P::FinalityProof, - is_free_execution_expected: bool, - ) -> Result; -} - -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs -/// sync loop. -pub fn metrics_prefix() -> String { - format!("{}_to_{}_Sync", P::SOURCE_NAME, P::TARGET_NAME) -} - -/// Finality sync information. -pub struct SyncInfo { - /// Best finalized header at the source client. - pub best_number_at_source: P::Number, - /// Best source header, known to the target client. - pub best_number_at_target: P::Number, - /// Whether the target client follows the same fork as the source client do. - pub is_using_same_fork: bool, -} - -impl SyncInfo

{ - /// Checks if both clients are on the same fork. - async fn is_on_same_fork>( - source_client: &SC, - id_at_target: &HeaderId, - ) -> Result { - let header_at_source = source_client.header_and_finality_proof(id_at_target.0).await?.0; - let header_hash_at_source = header_at_source.hash(); - Ok(if id_at_target.1 == header_hash_at_source { - true - } else { - log::error!( - target: "bridge", - "Source node ({}) and pallet at target node ({}) have different headers at the same height {:?}: \ - at-source {:?} vs at-target {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - id_at_target.0, - header_hash_at_source, - id_at_target.1, - ); - - false - }) - } - - async fn new, TC: TargetClient

>( - source_client: &SC, - target_client: &TC, - ) -> Result> { - let best_number_at_source = - source_client.best_finalized_block_number().await.map_err(Error::Source)?; - let best_id_at_target = - target_client.best_finalized_source_block_id().await.map_err(Error::Target)?; - let best_number_at_target = best_id_at_target.0; - - let is_using_same_fork = Self::is_on_same_fork(source_client, &best_id_at_target) - .await - .map_err(Error::Source)?; - - Ok(Self { best_number_at_source, best_number_at_target, is_using_same_fork }) - } - - fn update_metrics(&self, metrics_sync: &Option) { - if let Some(metrics_sync) = metrics_sync { - metrics_sync.update_best_block_at_source(self.best_number_at_source); - metrics_sync.update_best_block_at_target(self.best_number_at_target); - metrics_sync.update_using_same_fork(self.is_using_same_fork); - } - } - - pub fn num_headers(&self) -> P::Number { - self.best_number_at_source.saturating_sub(self.best_number_at_target) - } -} - -/// Information about transaction that we have submitted. -#[derive(Debug, Clone)] -pub struct Transaction { - /// Submitted transaction tracker. - tracker: Tracker, - /// The number of the header we have submitted. - header_number: Number, -} - -impl Transaction { - pub async fn submit< - P: FinalitySyncPipeline, - TC: TargetClient, - >( - target_client: &TC, - header: P::Header, - justification: P::FinalityProof, - is_free_execution_expected: bool, - ) -> Result { - let header_number = header.number(); - log::debug!( - target: "bridge", - "Going to submit finality proof of {} header #{:?} to {}", - P::SOURCE_NAME, - header_number, - P::TARGET_NAME, - ); - - let tracker = target_client - .submit_finality_proof(header, justification, is_free_execution_expected) - .await?; - Ok(Transaction { tracker, header_number }) - } - - async fn track< - P: FinalitySyncPipeline, - SC: SourceClient

, - TC: TargetClient

, - >( - self, - target_client: TC, - ) -> Result<(), Error> { - match self.tracker.wait().await { - TrackedTransactionStatus::Finalized(_) => { - // The transaction has been finalized, but it may have been finalized in the - // "failed" state. So let's check if the block number was actually updated. - target_client - .best_finalized_source_block_id() - .await - .map_err(Error::Target) - .and_then(|best_id_at_target| { - if self.header_number > best_id_at_target.0 { - return Err(Error::ProofSubmissionTxFailed { - submitted_number: self.header_number, - best_number_at_target: best_id_at_target.0, - }) - } - Ok(()) - }) - }, - TrackedTransactionStatus::Lost => Err(Error::ProofSubmissionTxLost), - } - } -} - -/// Finality synchronization loop state. -struct FinalityLoop, TC: TargetClient

> { - source_client: SC, - target_client: TC, - - sync_params: FinalitySyncParams, - metrics_sync: Option, - - progress: (Instant, Option), - retry_backoff: ExponentialBackoff, - finality_proofs_stream: FinalityProofsStream, - finality_proofs_buf: FinalityProofsBuf

, - best_submitted_number: Option, -} - -impl, TC: TargetClient

> FinalityLoop { - pub fn new( - source_client: SC, - target_client: TC, - sync_params: FinalitySyncParams, - metrics_sync: Option, - ) -> Self { - Self { - source_client, - target_client, - sync_params, - metrics_sync, - progress: (Instant::now(), None), - retry_backoff: retry_backoff(), - finality_proofs_stream: FinalityProofsStream::new(), - finality_proofs_buf: FinalityProofsBuf::new(vec![]), - best_submitted_number: None, - } - } - - fn update_progress(&mut self, info: &SyncInfo

) { - let (prev_time, prev_best_number_at_target) = self.progress; - let now = Instant::now(); - - let needs_update = now - prev_time > Duration::from_secs(10) || - prev_best_number_at_target - .map(|prev_best_number_at_target| { - info.best_number_at_target.saturating_sub(prev_best_number_at_target) > - 10.into() - }) - .unwrap_or(true); - - if !needs_update { - return - } - - log::info!( - target: "bridge", - "Synced {:?} of {:?} headers", - info.best_number_at_target, - info.best_number_at_source, - ); - - self.progress = (now, Some(info.best_number_at_target)) - } - - pub async fn select_header_to_submit( - &mut self, - info: &SyncInfo

, - free_headers_interval: Option, - ) -> Result>, Error> { - // to see that the loop is progressing - log::trace!( - target: "bridge", - "Considering range of headers ({}; {}]", - info.best_number_at_target, - info.best_number_at_source - ); - - // read missing headers - let selector = JustifiedHeaderSelector::new::( - &self.source_client, - info, - self.sync_params.headers_to_relay, - free_headers_interval, - ) - .await?; - // if we see that the header schedules GRANDPA change, we need to submit it - if self.sync_params.headers_to_relay == HeadersToRelay::Mandatory { - return Ok(selector.select_mandatory()) - } - - // all headers that are missing from the target client are non-mandatory - // => even if we have already selected some header and its persistent finality proof, - // we may try to select better header by reading non-persistent proofs from the stream - self.finality_proofs_buf.fill(&mut self.finality_proofs_stream); - let maybe_justified_header = selector.select( - info, - self.sync_params.headers_to_relay, - free_headers_interval, - &self.finality_proofs_buf, - ); - - // remove obsolete 'recent' finality proofs + keep its size under certain limit - let oldest_finality_proof_to_keep = maybe_justified_header - .as_ref() - .map(|justified_header| justified_header.number()) - .unwrap_or(info.best_number_at_target); - self.finality_proofs_buf.prune( - oldest_finality_proof_to_keep, - Some(self.sync_params.recent_finality_proofs_limit), - ); - - Ok(maybe_justified_header) - } - - pub async fn run_iteration( - &mut self, - free_headers_interval: Option, - ) -> Result< - Option>, - Error, - > { - // read best source headers ids from source and target nodes - let info = SyncInfo::new(&self.source_client, &self.target_client).await?; - info.update_metrics(&self.metrics_sync); - self.update_progress(&info); - - // if we have already submitted header, then we just need to wait for it - // if we're waiting too much, then we believe our transaction has been lost and restart sync - if Some(info.best_number_at_target) < self.best_submitted_number { - return Ok(None) - } - - // submit new header if we have something new - match self.select_header_to_submit(&info, free_headers_interval).await? { - Some(header) => { - let transaction = Transaction::submit( - &self.target_client, - header.header, - header.proof, - self.sync_params.headers_to_relay == HeadersToRelay::Free, - ) - .await - .map_err(Error::Target)?; - self.best_submitted_number = Some(transaction.header_number); - Ok(Some(transaction)) - }, - None => Ok(None), - } - } - - async fn ensure_finality_proofs_stream(&mut self) -> Result<(), FailedClient> { - if let Err(e) = self.finality_proofs_stream.ensure_stream(&self.source_client).await { - if e.is_connection_error() { - return Err(FailedClient::Source) - } - } - - Ok(()) - } - - /// Run finality relay loop until connection to one of nodes is lost. - async fn run_until_connection_lost( - &mut self, - exit_signal: impl Future, - ) -> Result<(), FailedClient> { - self.ensure_finality_proofs_stream().await?; - let proof_submission_tx_tracker = Fuse::terminated(); - let exit_signal = exit_signal.fuse(); - futures::pin_mut!(exit_signal, proof_submission_tx_tracker); - - let free_headers_interval = free_headers_interval(&self.target_client).await?; - - loop { - // run loop iteration - let next_tick = match self.run_iteration(free_headers_interval).await { - Ok(Some(tx)) => { - proof_submission_tx_tracker - .set(tx.track::(self.target_client.clone()).fuse()); - self.retry_backoff.reset(); - self.sync_params.tick - }, - Ok(None) => { - self.retry_backoff.reset(); - self.sync_params.tick - }, - Err(error) => { - log::error!(target: "bridge", "Finality sync loop iteration has failed with error: {:?}", error); - error.fail_if_connection_error()?; - self.retry_backoff - .next_backoff() - .unwrap_or(relay_utils::relay_loop::RECONNECT_DELAY) - }, - }; - self.ensure_finality_proofs_stream().await?; - - // wait till exit signal, or new source block - select! { - proof_submission_result = proof_submission_tx_tracker => { - if let Err(e) = proof_submission_result { - log::error!( - target: "bridge", - "Finality sync proof submission tx to {} has failed with error: {:?}.", - P::TARGET_NAME, - e, - ); - self.best_submitted_number = None; - e.fail_if_connection_error()?; - } - }, - _ = async_std::task::sleep(next_tick).fuse() => {}, - _ = exit_signal => return Ok(()), - } - } - } - - pub async fn run( - source_client: SC, - target_client: TC, - sync_params: FinalitySyncParams, - metrics_sync: Option, - exit_signal: impl Future, - ) -> Result<(), FailedClient> { - let mut finality_loop = Self::new(source_client, target_client, sync_params, metrics_sync); - finality_loop.run_until_connection_lost(exit_signal).await - } -} - -async fn free_headers_interval( - target_client: &impl TargetClient

, -) -> Result, FailedClient> { - match target_client.free_source_headers_interval().await { - Ok(Some(free_headers_interval)) if !free_headers_interval.is_zero() => { - log::trace!( - target: "bridge", - "Free headers interval for {} headers at {} is: {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - free_headers_interval, - ); - Ok(Some(free_headers_interval)) - }, - Ok(Some(_free_headers_interval)) => { - log::trace!( - target: "bridge", - "Free headers interval for {} headers at {} is zero. Not submitting any free headers", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - Ok(None) - }, - Ok(None) => { - log::trace!( - target: "bridge", - "Free headers interval for {} headers at {} is None. Not submitting any free headers", - P::SOURCE_NAME, - P::TARGET_NAME, - ); - - Ok(None) - }, - Err(e) => { - log::error!( - target: "bridge", - "Failed to read free headers interval for {} headers at {}: {:?}", - P::SOURCE_NAME, - P::TARGET_NAME, - e, - ); - Err(FailedClient::Target) - }, - } -} - -/// Run finality proofs synchronization loop. -pub async fn run( - source_client: impl SourceClient

, - target_client: impl TargetClient

, - sync_params: FinalitySyncParams, - metrics_params: MetricsParams, - exit_signal: impl Future + 'static + Send, -) -> Result<(), relay_utils::Error> { - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .with_metrics(metrics_params) - .loop_metric(SyncLoopMetrics::new( - Some(&metrics_prefix::

()), - "source", - "source_at_target", - )?)? - .expose() - .await? - .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - FinalityLoop::run( - source_client, - target_client, - sync_params.clone(), - metrics, - exit_signal.clone(), - ) - }) - .await -} - -#[cfg(test)] -mod tests { - use super::*; - - use crate::mock::*; - use futures::{FutureExt, StreamExt}; - use parking_lot::Mutex; - use relay_utils::{FailedClient, HeaderId, TrackedTransactionStatus}; - use std::{collections::HashMap, sync::Arc}; - - fn prepare_test_clients( - exit_sender: futures::channel::mpsc::UnboundedSender<()>, - state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static, - source_headers: HashMap)>, - ) -> (TestSourceClient, TestTargetClient) { - let internal_state_function: Arc = - Arc::new(move |data| { - if state_function(data) { - exit_sender.unbounded_send(()).unwrap(); - } - }); - let clients_data = Arc::new(Mutex::new(ClientsData { - source_best_block_number: 10, - source_headers, - source_proofs: vec![TestFinalityProof(12), TestFinalityProof(14)], - - target_best_block_id: HeaderId(5, 5), - target_headers: vec![], - target_transaction_tracker: TestTransactionTracker( - TrackedTransactionStatus::Finalized(Default::default()), - ), - })); - ( - TestSourceClient { - on_method_call: internal_state_function.clone(), - data: clients_data.clone(), - }, - TestTargetClient { on_method_call: internal_state_function, data: clients_data }, - ) - } - - fn test_sync_params() -> FinalitySyncParams { - FinalitySyncParams { - tick: Duration::from_secs(0), - recent_finality_proofs_limit: 1024, - stall_timeout: Duration::from_secs(1), - headers_to_relay: HeadersToRelay::All, - } - } - - fn run_sync_loop( - state_function: impl Fn(&mut ClientsData) -> bool + Send + Sync + 'static, - ) -> (ClientsData, Result<(), FailedClient>) { - let (exit_sender, exit_receiver) = futures::channel::mpsc::unbounded(); - let (source_client, target_client) = prepare_test_clients( - exit_sender, - state_function, - vec![ - (5, (TestSourceHeader(false, 5, 5), None)), - (6, (TestSourceHeader(false, 6, 6), None)), - (7, (TestSourceHeader(false, 7, 7), Some(TestFinalityProof(7)))), - (8, (TestSourceHeader(true, 8, 8), Some(TestFinalityProof(8)))), - (9, (TestSourceHeader(false, 9, 9), Some(TestFinalityProof(9)))), - (10, (TestSourceHeader(false, 10, 10), None)), - ] - .into_iter() - .collect(), - ); - let sync_params = test_sync_params(); - - let clients_data = source_client.data.clone(); - let result = async_std::task::block_on(FinalityLoop::run( - source_client, - target_client, - sync_params, - None, - exit_receiver.into_future().map(|(_, _)| ()), - )); - - let clients_data = clients_data.lock().clone(); - (clients_data, result) - } - - #[test] - fn finality_sync_loop_works() { - let (client_data, result) = run_sync_loop(|data| { - // header#7 has persistent finality proof, but it isn't mandatory => it isn't submitted, - // because header#8 has persistent finality proof && it is mandatory => it is submitted - // header#9 has persistent finality proof, but it isn't mandatory => it is submitted, - // because there are no more persistent finality proofs - // - // once this ^^^ is done, we generate more blocks && read proof for blocks 12 and 14 - // from the stream - if data.target_best_block_id.0 == 9 { - data.source_best_block_number = 14; - data.source_headers.insert(11, (TestSourceHeader(false, 11, 11), None)); - data.source_headers - .insert(12, (TestSourceHeader(false, 12, 12), Some(TestFinalityProof(12)))); - data.source_headers.insert(13, (TestSourceHeader(false, 13, 13), None)); - data.source_headers - .insert(14, (TestSourceHeader(false, 14, 14), Some(TestFinalityProof(14)))); - } - // once this ^^^ is done, we generate more blocks && read persistent proof for block 16 - if data.target_best_block_id.0 == 14 { - data.source_best_block_number = 17; - data.source_headers.insert(15, (TestSourceHeader(false, 15, 15), None)); - data.source_headers - .insert(16, (TestSourceHeader(false, 16, 16), Some(TestFinalityProof(16)))); - data.source_headers.insert(17, (TestSourceHeader(false, 17, 17), None)); - } - - data.target_best_block_id.0 == 16 - }); - - assert_eq!(result, Ok(())); - assert_eq!( - client_data.target_headers, - vec![ - // before adding 11..14: finality proof for mandatory header#8 - (TestSourceHeader(true, 8, 8), TestFinalityProof(8)), - // before adding 11..14: persistent finality proof for non-mandatory header#9 - (TestSourceHeader(false, 9, 9), TestFinalityProof(9)), - // after adding 11..14: ephemeral finality proof for non-mandatory header#14 - (TestSourceHeader(false, 14, 14), TestFinalityProof(14)), - // after adding 15..17: persistent finality proof for non-mandatory header#16 - (TestSourceHeader(false, 16, 16), TestFinalityProof(16)), - ], - ); - } - - fn run_headers_to_relay_mode_test( - headers_to_relay: HeadersToRelay, - has_mandatory_headers: bool, - ) -> Option> { - let (exit_sender, _) = futures::channel::mpsc::unbounded(); - let (source_client, target_client) = prepare_test_clients( - exit_sender, - |_| false, - vec![ - (6, (TestSourceHeader(false, 6, 6), Some(TestFinalityProof(6)))), - (7, (TestSourceHeader(false, 7, 7), Some(TestFinalityProof(7)))), - (8, (TestSourceHeader(has_mandatory_headers, 8, 8), Some(TestFinalityProof(8)))), - (9, (TestSourceHeader(false, 9, 9), Some(TestFinalityProof(9)))), - (10, (TestSourceHeader(false, 10, 10), Some(TestFinalityProof(10)))), - ] - .into_iter() - .collect(), - ); - async_std::task::block_on(async { - let mut finality_loop = FinalityLoop::new( - source_client, - target_client, - FinalitySyncParams { - tick: Duration::from_secs(0), - recent_finality_proofs_limit: 0, - stall_timeout: Duration::from_secs(0), - headers_to_relay, - }, - None, - ); - let info = SyncInfo { - best_number_at_source: 10, - best_number_at_target: 5, - is_using_same_fork: true, - }; - finality_loop.select_header_to_submit(&info, Some(3)).await.unwrap() - }) - } - - #[test] - fn select_header_to_submit_may_select_non_mandatory_header() { - assert_eq!(run_headers_to_relay_mode_test(HeadersToRelay::Mandatory, false), None); - assert_eq!( - run_headers_to_relay_mode_test(HeadersToRelay::Free, false), - Some(JustifiedHeader { - header: TestSourceHeader(false, 10, 10), - proof: TestFinalityProof(10) - }), - ); - assert_eq!( - run_headers_to_relay_mode_test(HeadersToRelay::All, false), - Some(JustifiedHeader { - header: TestSourceHeader(false, 10, 10), - proof: TestFinalityProof(10) - }), - ); - } - - #[test] - fn select_header_to_submit_may_select_mandatory_header() { - assert_eq!( - run_headers_to_relay_mode_test(HeadersToRelay::Mandatory, true), - Some(JustifiedHeader { - header: TestSourceHeader(true, 8, 8), - proof: TestFinalityProof(8) - }), - ); - assert_eq!( - run_headers_to_relay_mode_test(HeadersToRelay::Free, true), - Some(JustifiedHeader { - header: TestSourceHeader(true, 8, 8), - proof: TestFinalityProof(8) - }), - ); - assert_eq!( - run_headers_to_relay_mode_test(HeadersToRelay::All, true), - Some(JustifiedHeader { - header: TestSourceHeader(true, 8, 8), - proof: TestFinalityProof(8) - }), - ); - } - - #[test] - fn different_forks_at_source_and_at_target_are_detected() { - let (exit_sender, _exit_receiver) = futures::channel::mpsc::unbounded(); - let (source_client, target_client) = prepare_test_clients( - exit_sender, - |_| false, - vec![ - (5, (TestSourceHeader(false, 5, 42), None)), - (6, (TestSourceHeader(false, 6, 6), None)), - (7, (TestSourceHeader(false, 7, 7), None)), - (8, (TestSourceHeader(false, 8, 8), None)), - (9, (TestSourceHeader(false, 9, 9), None)), - (10, (TestSourceHeader(false, 10, 10), None)), - ] - .into_iter() - .collect(), - ); - - let metrics_sync = SyncLoopMetrics::new(None, "source", "target").unwrap(); - async_std::task::block_on(async { - let mut finality_loop = FinalityLoop::new( - source_client, - target_client, - test_sync_params(), - Some(metrics_sync.clone()), - ); - finality_loop.run_iteration(None).await.unwrap() - }); - - assert!(!metrics_sync.is_using_same_fork()); - } -} diff --git a/relays/finality/src/finality_proofs.rs b/relays/finality/src/finality_proofs.rs deleted file mode 100644 index e78cf8d62790dfc5ad665b1d4f298a5abe592164..0000000000000000000000000000000000000000 --- a/relays/finality/src/finality_proofs.rs +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{base::SourceClientBase, FinalityPipeline}; - -use bp_header_chain::FinalityProof; -use futures::{FutureExt, Stream, StreamExt}; -use std::pin::Pin; - -/// Source finality proofs stream that may be restarted. -#[derive(Default)] -pub struct FinalityProofsStream> { - /// The underlying stream. - stream: Option>>, -} - -impl> FinalityProofsStream { - pub fn new() -> Self { - Self { stream: None } - } - - pub fn from_stream(stream: SC::FinalityProofsStream) -> Self { - Self { stream: Some(Box::pin(stream)) } - } - - fn next(&mut self) -> Option<::Item> { - let stream = match &mut self.stream { - Some(stream) => stream, - None => return None, - }; - - match stream.next().now_or_never() { - Some(Some(finality_proof)) => Some(finality_proof), - Some(None) => { - self.stream = None; - None - }, - None => None, - } - } - - pub async fn ensure_stream(&mut self, source_client: &SC) -> Result<(), SC::Error> { - if self.stream.is_none() { - log::warn!(target: "bridge", "{} finality proofs stream is being started / restarted", - P::SOURCE_NAME); - - let stream = source_client.finality_proofs().await.map_err(|error| { - log::error!( - target: "bridge", - "Failed to subscribe to {} justifications: {:?}", - P::SOURCE_NAME, - error, - ); - - error - })?; - self.stream = Some(Box::pin(stream)); - } - - Ok(()) - } -} - -/// Source finality proofs buffer. -pub struct FinalityProofsBuf { - /// Proofs buffer. Ordered by target header number. - buf: Vec, -} - -impl FinalityProofsBuf

{ - pub fn new(buf: Vec) -> Self { - Self { buf } - } - - pub fn buf(&self) -> &Vec { - &self.buf - } - - pub fn fill>(&mut self, stream: &mut FinalityProofsStream) { - let mut proofs_count = 0; - let mut first_header_number = None; - let mut last_header_number = None; - while let Some(finality_proof) = stream.next() { - let target_header_number = finality_proof.target_header_number(); - first_header_number.get_or_insert(target_header_number); - last_header_number = Some(target_header_number); - proofs_count += 1; - - self.buf.push(finality_proof); - } - - if proofs_count != 0 { - log::trace!( - target: "bridge", - "Read {} finality proofs from {} finality stream for headers in range [{:?}; {:?}]", - proofs_count, - P::SOURCE_NAME, - first_header_number, - last_header_number, - ); - } - } - - /// Prune all finality proofs that target header numbers older than `first_to_keep`. - pub fn prune(&mut self, first_to_keep: P::Number, maybe_buf_limit: Option) { - let first_to_keep_idx = self - .buf - .binary_search_by_key(&first_to_keep, |hdr| hdr.target_header_number()) - .map(|idx| idx + 1) - .unwrap_or_else(|idx| idx); - let buf_limit_idx = match maybe_buf_limit { - Some(buf_limit) => self.buf.len().saturating_sub(buf_limit), - None => 0, - }; - - self.buf = self.buf.split_off(std::cmp::max(first_to_keep_idx, buf_limit_idx)); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - - #[test] - fn finality_proofs_buf_fill_works() { - // when stream is currently empty, nothing is changed - let mut finality_proofs_buf = - FinalityProofsBuf:: { buf: vec![TestFinalityProof(1)] }; - let mut stream = - FinalityProofsStream::::from_stream( - Box::pin(futures::stream::pending()), - ); - finality_proofs_buf.fill(&mut stream); - assert_eq!(finality_proofs_buf.buf, vec![TestFinalityProof(1)]); - assert!(stream.stream.is_some()); - - // when stream has entry with target, it is added to the recent proofs container - let mut stream = - FinalityProofsStream::::from_stream( - Box::pin( - futures::stream::iter(vec![TestFinalityProof(4)]) - .chain(futures::stream::pending()), - ), - ); - finality_proofs_buf.fill(&mut stream); - assert_eq!(finality_proofs_buf.buf, vec![TestFinalityProof(1), TestFinalityProof(4)]); - assert!(stream.stream.is_some()); - - // when stream has ended, we'll need to restart it - let mut stream = - FinalityProofsStream::::from_stream( - Box::pin(futures::stream::empty()), - ); - finality_proofs_buf.fill(&mut stream); - assert_eq!(finality_proofs_buf.buf, vec![TestFinalityProof(1), TestFinalityProof(4)]); - assert!(stream.stream.is_none()); - } - - #[test] - fn finality_proofs_buf_prune_works() { - let original_finality_proofs_buf: Vec< - ::FinalityProof, - > = vec![ - TestFinalityProof(10), - TestFinalityProof(13), - TestFinalityProof(15), - TestFinalityProof(17), - TestFinalityProof(19), - ] - .into_iter() - .collect(); - - // when there's proof for justified header in the vec - let mut finality_proofs_buf = FinalityProofsBuf:: { - buf: original_finality_proofs_buf.clone(), - }; - finality_proofs_buf.prune(10, None); - assert_eq!(&original_finality_proofs_buf[1..], finality_proofs_buf.buf,); - - // when there are no proof for justified header in the vec - let mut finality_proofs_buf = FinalityProofsBuf:: { - buf: original_finality_proofs_buf.clone(), - }; - finality_proofs_buf.prune(11, None); - assert_eq!(&original_finality_proofs_buf[1..], finality_proofs_buf.buf,); - - // when there are too many entries after initial prune && they also need to be pruned - let mut finality_proofs_buf = FinalityProofsBuf:: { - buf: original_finality_proofs_buf.clone(), - }; - finality_proofs_buf.prune(10, Some(2)); - assert_eq!(&original_finality_proofs_buf[3..], finality_proofs_buf.buf,); - - // when last entry is pruned - let mut finality_proofs_buf = FinalityProofsBuf:: { - buf: original_finality_proofs_buf.clone(), - }; - finality_proofs_buf.prune(19, Some(2)); - assert_eq!(&original_finality_proofs_buf[5..], finality_proofs_buf.buf,); - - // when post-last entry is pruned - let mut finality_proofs_buf = FinalityProofsBuf:: { - buf: original_finality_proofs_buf.clone(), - }; - finality_proofs_buf.prune(20, Some(2)); - assert_eq!(&original_finality_proofs_buf[5..], finality_proofs_buf.buf,); - } -} diff --git a/relays/finality/src/headers.rs b/relays/finality/src/headers.rs deleted file mode 100644 index 5bba4a384562d1f97334cd809ba47267698308f9..0000000000000000000000000000000000000000 --- a/relays/finality/src/headers.rs +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - finality_loop::SyncInfo, finality_proofs::FinalityProofsBuf, Error, FinalitySyncPipeline, - HeadersToRelay, SourceClient, SourceHeader, TargetClient, -}; - -use bp_header_chain::FinalityProof; -use num_traits::Saturating; -use std::cmp::Ordering; - -/// Unjustified headers container. Ordered by header number. -pub type UnjustifiedHeaders = Vec; - -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct JustifiedHeader { - pub header: P::Header, - pub proof: P::FinalityProof, -} - -impl JustifiedHeader

{ - pub fn number(&self) -> P::Number { - self.header.number() - } -} - -/// Finality proof that has been selected by the `read_missing_headers` function. -pub enum JustifiedHeaderSelector { - /// Mandatory header and its proof has been selected. We shall submit proof for this header. - Mandatory(JustifiedHeader

), - /// Regular header and its proof has been selected. We may submit this proof, or proof for - /// some better header. - Regular(UnjustifiedHeaders, JustifiedHeader

), - /// We haven't found any missing header with persistent proof at the target client. - None(UnjustifiedHeaders), -} - -impl JustifiedHeaderSelector

{ - /// Selects last header with persistent justification, missing from the target and matching - /// the `headers_to_relay` criteria. - pub(crate) async fn new, TC: TargetClient

>( - source_client: &SC, - info: &SyncInfo

, - headers_to_relay: HeadersToRelay, - free_headers_interval: Option, - ) -> Result> { - let mut unjustified_headers = Vec::new(); - let mut maybe_justified_header = None; - - let mut header_number = info.best_number_at_target + 1.into(); - while header_number <= info.best_number_at_source { - let (header, maybe_proof) = source_client - .header_and_finality_proof(header_number) - .await - .map_err(Error::Source)?; - - match (header.is_mandatory(), maybe_proof) { - (true, Some(proof)) => { - log::trace!(target: "bridge", "Header {:?} is mandatory", header_number); - return Ok(Self::Mandatory(JustifiedHeader { header, proof })) - }, - (true, None) => return Err(Error::MissingMandatoryFinalityProof(header.number())), - (false, Some(proof)) - if need_to_relay::

( - info, - headers_to_relay, - free_headers_interval, - &header, - ) => - { - log::trace!(target: "bridge", "Header {:?} has persistent finality proof", header_number); - unjustified_headers.clear(); - maybe_justified_header = Some(JustifiedHeader { header, proof }); - }, - _ => { - unjustified_headers.push(header); - }, - } - - header_number = header_number + 1.into(); - } - - log::trace!( - target: "bridge", - "Read {} {} headers. Selected finality proof for header: {:?}", - info.num_headers(), - P::SOURCE_NAME, - maybe_justified_header.as_ref().map(|justified_header| &justified_header.header), - ); - - Ok(match maybe_justified_header { - Some(justified_header) => Self::Regular(unjustified_headers, justified_header), - None => Self::None(unjustified_headers), - }) - } - - /// Returns selected mandatory header if we have seen one. Otherwise returns `None`. - pub fn select_mandatory(self) -> Option> { - match self { - JustifiedHeaderSelector::Mandatory(header) => Some(header), - _ => None, - } - } - - /// Tries to improve previously selected header using ephemeral - /// justifications stream. - pub fn select( - self, - info: &SyncInfo

, - headers_to_relay: HeadersToRelay, - free_headers_interval: Option, - buf: &FinalityProofsBuf

, - ) -> Option> { - let (unjustified_headers, maybe_justified_header) = match self { - JustifiedHeaderSelector::Mandatory(justified_header) => return Some(justified_header), - JustifiedHeaderSelector::Regular(unjustified_headers, justified_header) => - (unjustified_headers, Some(justified_header)), - JustifiedHeaderSelector::None(unjustified_headers) => (unjustified_headers, None), - }; - - let mut finality_proofs_iter = buf.buf().iter().rev(); - let mut maybe_finality_proof = finality_proofs_iter.next(); - - let mut unjustified_headers_iter = unjustified_headers.iter().rev(); - let mut maybe_unjustified_header = unjustified_headers_iter.next(); - - while let (Some(finality_proof), Some(unjustified_header)) = - (maybe_finality_proof, maybe_unjustified_header) - { - match finality_proof.target_header_number().cmp(&unjustified_header.number()) { - Ordering::Equal - if need_to_relay::

( - info, - headers_to_relay, - free_headers_interval, - &unjustified_header, - ) => - { - log::trace!( - target: "bridge", - "Managed to improve selected {} finality proof {:?} to {:?}.", - P::SOURCE_NAME, - maybe_justified_header.as_ref().map(|justified_header| justified_header.number()), - finality_proof.target_header_number() - ); - return Some(JustifiedHeader { - header: unjustified_header.clone(), - proof: finality_proof.clone(), - }) - }, - Ordering::Equal => { - maybe_finality_proof = finality_proofs_iter.next(); - maybe_unjustified_header = unjustified_headers_iter.next(); - }, - Ordering::Less => maybe_unjustified_header = unjustified_headers_iter.next(), - Ordering::Greater => { - maybe_finality_proof = finality_proofs_iter.next(); - }, - } - } - - log::trace!( - target: "bridge", - "Could not improve selected {} finality proof {:?}.", - P::SOURCE_NAME, - maybe_justified_header.as_ref().map(|justified_header| justified_header.number()) - ); - maybe_justified_header - } -} - -/// Returns true if we want to relay header `header_number`. -fn need_to_relay( - info: &SyncInfo

, - headers_to_relay: HeadersToRelay, - free_headers_interval: Option, - header: &P::Header, -) -> bool { - match headers_to_relay { - HeadersToRelay::All => true, - HeadersToRelay::Mandatory => header.is_mandatory(), - HeadersToRelay::Free => - header.is_mandatory() || - free_headers_interval - .map(|free_headers_interval| { - header.number().saturating_sub(info.best_number_at_target) >= - free_headers_interval - }) - .unwrap_or(false), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - - #[test] - fn select_better_recent_finality_proof_works() { - let info = SyncInfo { - best_number_at_source: 10, - best_number_at_target: 5, - is_using_same_fork: true, - }; - - // if there are no unjustified headers, nothing is changed - let finality_proofs_buf = - FinalityProofsBuf::::new(vec![TestFinalityProof(5)]); - let justified_header = - JustifiedHeader { header: TestSourceHeader(false, 2, 2), proof: TestFinalityProof(2) }; - let selector = JustifiedHeaderSelector::Regular(vec![], justified_header.clone()); - assert_eq!( - selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), - Some(justified_header) - ); - - // if there are no buffered finality proofs, nothing is changed - let finality_proofs_buf = FinalityProofsBuf::::new(vec![]); - let justified_header = - JustifiedHeader { header: TestSourceHeader(false, 2, 2), proof: TestFinalityProof(2) }; - let selector = JustifiedHeaderSelector::Regular( - vec![TestSourceHeader(false, 5, 5)], - justified_header.clone(), - ); - assert_eq!( - selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), - Some(justified_header) - ); - - // if there's no intersection between recent finality proofs and unjustified headers, - // nothing is changed - let finality_proofs_buf = FinalityProofsBuf::::new(vec![ - TestFinalityProof(1), - TestFinalityProof(4), - ]); - let justified_header = - JustifiedHeader { header: TestSourceHeader(false, 2, 2), proof: TestFinalityProof(2) }; - let selector = JustifiedHeaderSelector::Regular( - vec![TestSourceHeader(false, 9, 9), TestSourceHeader(false, 10, 10)], - justified_header.clone(), - ); - assert_eq!( - selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), - Some(justified_header) - ); - - // if there's intersection between recent finality proofs and unjustified headers, but there - // are no proofs in this intersection, nothing is changed - let finality_proofs_buf = FinalityProofsBuf::::new(vec![ - TestFinalityProof(7), - TestFinalityProof(11), - ]); - let justified_header = - JustifiedHeader { header: TestSourceHeader(false, 2, 2), proof: TestFinalityProof(2) }; - let selector = JustifiedHeaderSelector::Regular( - vec![ - TestSourceHeader(false, 8, 8), - TestSourceHeader(false, 9, 9), - TestSourceHeader(false, 10, 10), - ], - justified_header.clone(), - ); - assert_eq!( - selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), - Some(justified_header) - ); - - // if there's intersection between recent finality proofs and unjustified headers and - // there's a proof in this intersection: - // - this better (last from intersection) proof is selected; - // - 'obsolete' unjustified headers are pruned. - let finality_proofs_buf = FinalityProofsBuf::::new(vec![ - TestFinalityProof(7), - TestFinalityProof(9), - ]); - let justified_header = - JustifiedHeader { header: TestSourceHeader(false, 2, 2), proof: TestFinalityProof(2) }; - let selector = JustifiedHeaderSelector::Regular( - vec![ - TestSourceHeader(false, 8, 8), - TestSourceHeader(false, 9, 9), - TestSourceHeader(false, 10, 10), - ], - justified_header, - ); - assert_eq!( - selector.select(&info, HeadersToRelay::All, None, &finality_proofs_buf), - Some(JustifiedHeader { - header: TestSourceHeader(false, 9, 9), - proof: TestFinalityProof(9) - }) - ); - - // when only free headers needs to be relayed and there are no free headers - let finality_proofs_buf = FinalityProofsBuf::::new(vec![ - TestFinalityProof(7), - TestFinalityProof(9), - ]); - let selector = JustifiedHeaderSelector::None(vec![ - TestSourceHeader(false, 8, 8), - TestSourceHeader(false, 9, 9), - TestSourceHeader(false, 10, 10), - ]); - assert_eq!( - selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), - None, - ); - - // when only free headers needs to be relayed, mandatory header may be selected - let finality_proofs_buf = FinalityProofsBuf::::new(vec![ - TestFinalityProof(6), - TestFinalityProof(9), - ]); - let selector = JustifiedHeaderSelector::None(vec![ - TestSourceHeader(false, 8, 8), - TestSourceHeader(true, 9, 9), - TestSourceHeader(false, 10, 10), - ]); - assert_eq!( - selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), - Some(JustifiedHeader { - header: TestSourceHeader(true, 9, 9), - proof: TestFinalityProof(9) - }) - ); - - // when only free headers needs to be relayed and there is free header - let finality_proofs_buf = FinalityProofsBuf::::new(vec![ - TestFinalityProof(7), - TestFinalityProof(9), - TestFinalityProof(14), - ]); - let selector = JustifiedHeaderSelector::None(vec![ - TestSourceHeader(false, 7, 7), - TestSourceHeader(false, 10, 10), - TestSourceHeader(false, 14, 14), - ]); - assert_eq!( - selector.select(&info, HeadersToRelay::Free, Some(7), &finality_proofs_buf), - Some(JustifiedHeader { - header: TestSourceHeader(false, 14, 14), - proof: TestFinalityProof(14) - }) - ); - } -} diff --git a/relays/finality/src/lib.rs b/relays/finality/src/lib.rs deleted file mode 100644 index 4346f96674b4c43c153ad8bf55cb5ee963871849..0000000000000000000000000000000000000000 --- a/relays/finality/src/lib.rs +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! This crate has single entrypoint to run synchronization loop that is built around finality -//! proofs, as opposed to headers synchronization loop, which is built around headers. The headers -//! are still submitted to the target node, but are treated as auxiliary data as we are not trying -//! to submit all source headers to the target node. - -pub use crate::{ - base::{FinalityPipeline, SourceClientBase}, - finality_loop::{ - metrics_prefix, run, FinalitySyncParams, HeadersToRelay, SourceClient, TargetClient, - }, - finality_proofs::{FinalityProofsBuf, FinalityProofsStream}, - sync_loop_metrics::SyncLoopMetrics, -}; - -use bp_header_chain::ConsensusLogReader; -use relay_utils::{FailedClient, MaybeConnectionError}; -use std::fmt::Debug; - -mod base; -mod finality_loop; -mod finality_proofs; -mod headers; -mod mock; -mod sync_loop_metrics; - -/// Finality proofs synchronization pipeline. -pub trait FinalitySyncPipeline: FinalityPipeline { - /// A reader that can extract the consensus log from the header digest and interpret it. - type ConsensusLogReader: ConsensusLogReader; - /// Type of header that we're syncing. - type Header: SourceHeader; -} - -/// Header that we're receiving from source node. -pub trait SourceHeader: Clone + Debug + PartialEq + Send + Sync { - /// Returns hash of header. - fn hash(&self) -> Hash; - /// Returns number of header. - fn number(&self) -> Number; - /// Returns true if this header needs to be submitted to target node. - fn is_mandatory(&self) -> bool; -} - -/// Error that may happen inside finality synchronization loop. -#[derive(Debug)] -enum Error { - /// Source client request has failed with given error. - Source(SourceError), - /// Target client request has failed with given error. - Target(TargetError), - /// Finality proof for mandatory header is missing from the source node. - MissingMandatoryFinalityProof(P::Number), - /// `submit_finality_proof` transaction failed - ProofSubmissionTxFailed { - #[allow(dead_code)] - submitted_number: P::Number, - #[allow(dead_code)] - best_number_at_target: P::Number, - }, - /// `submit_finality_proof` transaction lost - ProofSubmissionTxLost, -} - -impl Error -where - P: FinalitySyncPipeline, - SourceError: MaybeConnectionError, - TargetError: MaybeConnectionError, -{ - fn fail_if_connection_error(&self) -> Result<(), FailedClient> { - match *self { - Error::Source(ref error) if error.is_connection_error() => Err(FailedClient::Source), - Error::Target(ref error) if error.is_connection_error() => Err(FailedClient::Target), - _ => Ok(()), - } - } -} diff --git a/relays/finality/src/mock.rs b/relays/finality/src/mock.rs deleted file mode 100644 index 69357f71ce27d54a2ca4866e3fd6db0a73fb44e2..0000000000000000000000000000000000000000 --- a/relays/finality/src/mock.rs +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tests for finality synchronization loop. - -#![cfg(test)] - -use crate::{ - base::SourceClientBase, - finality_loop::{SourceClient, TargetClient}, - FinalityPipeline, FinalitySyncPipeline, SourceHeader, -}; - -use async_trait::async_trait; -use bp_header_chain::{FinalityProof, GrandpaConsensusLogReader}; -use futures::{Stream, StreamExt}; -use parking_lot::Mutex; -use relay_utils::{ - relay_loop::Client as RelayClient, HeaderId, MaybeConnectionError, TrackedTransactionStatus, - TransactionTracker, -}; -use std::{collections::HashMap, pin::Pin, sync::Arc}; - -type IsMandatory = bool; -pub type TestNumber = u64; -type TestHash = u64; - -#[derive(Clone, Debug)] -pub struct TestTransactionTracker(pub TrackedTransactionStatus>); - -impl Default for TestTransactionTracker { - fn default() -> TestTransactionTracker { - TestTransactionTracker(TrackedTransactionStatus::Finalized(Default::default())) - } -} - -#[async_trait] -impl TransactionTracker for TestTransactionTracker { - type HeaderId = HeaderId; - - async fn wait(self) -> TrackedTransactionStatus> { - self.0 - } -} - -#[derive(Debug, Clone)] -pub enum TestError { - NonConnection, -} - -impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - false - } -} - -#[derive(Debug, Clone, PartialEq)] -pub struct TestFinalitySyncPipeline; - -impl FinalityPipeline for TestFinalitySyncPipeline { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type Hash = TestHash; - type Number = TestNumber; - type FinalityProof = TestFinalityProof; -} - -impl FinalitySyncPipeline for TestFinalitySyncPipeline { - type ConsensusLogReader = GrandpaConsensusLogReader; - type Header = TestSourceHeader; -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TestSourceHeader(pub IsMandatory, pub TestNumber, pub TestHash); - -impl SourceHeader> - for TestSourceHeader -{ - fn hash(&self) -> TestHash { - self.2 - } - - fn number(&self) -> TestNumber { - self.1 - } - - fn is_mandatory(&self) -> bool { - self.0 - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct TestFinalityProof(pub TestNumber); - -impl FinalityProof for TestFinalityProof { - fn target_header_hash(&self) -> TestHash { - Default::default() - } - - fn target_header_number(&self) -> TestNumber { - self.0 - } -} - -#[derive(Debug, Clone, Default)] -pub struct ClientsData { - pub source_best_block_number: TestNumber, - pub source_headers: HashMap)>, - pub source_proofs: Vec, - - pub target_best_block_id: HeaderId, - pub target_headers: Vec<(TestSourceHeader, TestFinalityProof)>, - pub target_transaction_tracker: TestTransactionTracker, -} - -#[derive(Clone)] -pub struct TestSourceClient { - pub on_method_call: Arc, - pub data: Arc>, -} - -#[async_trait] -impl RelayClient for TestSourceClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unreachable!() - } -} - -#[async_trait] -impl SourceClientBase for TestSourceClient { - type FinalityProofsStream = Pin + 'static + Send>>; - - async fn finality_proofs(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(&mut data); - Ok(futures::stream::iter(data.source_proofs.clone()).boxed()) - } -} - -#[async_trait] -impl SourceClient for TestSourceClient { - async fn best_finalized_block_number(&self) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(&mut data); - Ok(data.source_best_block_number) - } - - async fn header_and_finality_proof( - &self, - number: TestNumber, - ) -> Result<(TestSourceHeader, Option), TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(&mut data); - data.source_headers.get(&number).cloned().ok_or(TestError::NonConnection) - } -} - -#[derive(Clone)] -pub struct TestTargetClient { - pub on_method_call: Arc, - pub data: Arc>, -} - -#[async_trait] -impl RelayClient for TestTargetClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unreachable!() - } -} - -#[async_trait] -impl TargetClient for TestTargetClient { - type TransactionTracker = TestTransactionTracker; - - async fn best_finalized_source_block_id( - &self, - ) -> Result, TestError> { - let mut data = self.data.lock(); - (self.on_method_call)(&mut data); - Ok(data.target_best_block_id) - } - - async fn free_source_headers_interval(&self) -> Result, TestError> { - Ok(Some(3)) - } - - async fn submit_finality_proof( - &self, - header: TestSourceHeader, - proof: TestFinalityProof, - _is_free_execution_expected: bool, - ) -> Result { - let mut data = self.data.lock(); - (self.on_method_call)(&mut data); - data.target_best_block_id = HeaderId(header.number(), header.hash()); - data.target_headers.push((header, proof)); - (self.on_method_call)(&mut data); - Ok(data.target_transaction_tracker.clone()) - } -} diff --git a/relays/finality/src/sync_loop_metrics.rs b/relays/finality/src/sync_loop_metrics.rs deleted file mode 100644 index 4da1df811f6ece0597520a99c44c1e29171c3e20..0000000000000000000000000000000000000000 --- a/relays/finality/src/sync_loop_metrics.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for headers synchronization relay loop. - -use relay_utils::{ - metrics::{metric_name, register, IntGauge, Metric, PrometheusError, Registry}, - UniqueSaturatedInto, -}; - -/// Headers sync metrics. -#[derive(Clone)] -pub struct SyncLoopMetrics { - /// Best syncing header at the source. - best_source_block_number: IntGauge, - /// Best syncing header at the target. - best_target_block_number: IntGauge, - /// Flag that has `0` value when best source headers at the source node and at-target-chain - /// are matching and `1` otherwise. - using_different_forks: IntGauge, -} - -impl SyncLoopMetrics { - /// Create and register headers loop metrics. - pub fn new( - prefix: Option<&str>, - at_source_chain_label: &str, - at_target_chain_label: &str, - ) -> Result { - Ok(SyncLoopMetrics { - best_source_block_number: IntGauge::new( - metric_name(prefix, &format!("best_{at_source_chain_label}_block_number")), - format!("Best block number at the {at_source_chain_label}"), - )?, - best_target_block_number: IntGauge::new( - metric_name(prefix, &format!("best_{at_target_chain_label}_block_number")), - format!("Best block number at the {at_target_chain_label}"), - )?, - using_different_forks: IntGauge::new( - metric_name(prefix, &format!("is_{at_source_chain_label}_and_{at_target_chain_label}_using_different_forks")), - "Whether the best finalized source block at target node is different (value 1) from the \ - corresponding block at the source node", - )?, - }) - } - - /// Returns current value of the using-same-fork flag. - #[cfg(test)] - pub(crate) fn is_using_same_fork(&self) -> bool { - self.using_different_forks.get() == 0 - } - - /// Update best block number at source. - pub fn update_best_block_at_source>( - &self, - source_best_number: Number, - ) { - self.best_source_block_number.set(source_best_number.unique_saturated_into()); - } - - /// Update best block number at target. - pub fn update_best_block_at_target>( - &self, - target_best_number: Number, - ) { - self.best_target_block_number.set(target_best_number.unique_saturated_into()); - } - - /// Update using-same-fork flag. - pub fn update_using_same_fork(&self, using_same_fork: bool) { - self.using_different_forks.set((!using_same_fork).into()) - } -} - -impl Metric for SyncLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.best_source_block_number.clone(), registry)?; - register(self.best_target_block_number.clone(), registry)?; - register(self.using_different_forks.clone(), registry)?; - Ok(()) - } -} diff --git a/relays/lib-substrate-relay/Cargo.toml b/relays/lib-substrate-relay/Cargo.toml deleted file mode 100644 index 161548ac4d5e2ba014130d0807acce45252e867a..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/Cargo.toml +++ /dev/null @@ -1,60 +0,0 @@ -[package] -name = "substrate-relay-helper" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -anyhow = "1.0" -thiserror = { workspace = true } -async-std = "1.9.0" -async-trait = "0.1" -codec = { package = "parity-scale-codec", version = "3.1.5" } -futures = "0.3.30" -hex = "0.4" -num-traits = "0.2" -log = { workspace = true } - -# Bridge dependencies - -bp-header-chain = { path = "../../primitives/header-chain" } -bp-parachains = { path = "../../primitives/parachains" } -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -bp-relayers = { path = "../../primitives/relayers" } -bridge-runtime-common = { path = "../../bin/runtime-common" } - -equivocation-detector = { path = "../equivocation" } -finality-grandpa = { version = "0.16.2" } -finality-relay = { path = "../finality" } -parachains-relay = { path = "../parachains" } -relay-utils = { path = "../utils" } -messages-relay = { path = "../messages" } -relay-substrate-client = { path = "../client-substrate" } - -pallet-bridge-grandpa = { path = "../../modules/grandpa" } -pallet-bridge-messages = { path = "../../modules/messages" } -pallet-bridge-parachains = { path = "../../modules/parachains" } - -bp-runtime = { path = "../../primitives/runtime" } -bp-messages = { path = "../../primitives/messages" } - -# Substrate Dependencies - -frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -frame-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -pallet-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-consensus-grandpa = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } - -[dev-dependencies] -bp-rococo = { path = "../../primitives/chain-rococo" } -pallet-transaction-payment = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -relay-bridge-hub-rococo-client = { path = "../client-bridge-hub-rococo" } -relay-bridge-hub-westend-client = { path = "../client-bridge-hub-westend" } -relay-rococo-client = { path = "../client-rococo" } diff --git a/relays/lib-substrate-relay/src/equivocation/mod.rs b/relays/lib-substrate-relay/src/equivocation/mod.rs deleted file mode 100644 index f6d58cbaa4ab4c4d7f489de5a80ab226b3b475b4..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/equivocation/mod.rs +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types and functions intended to ease adding of new Substrate -> Substrate -//! equivocation detection pipelines. - -mod source; -mod target; - -use crate::{ - equivocation::{source::SubstrateEquivocationSource, target::SubstrateEquivocationTarget}, - finality_base::{engine::Engine, SubstrateFinalityPipeline, SubstrateFinalityProof}, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_runtime::{AccountIdOf, BlockNumberOf, HashOf}; -use equivocation_detector::EquivocationDetectionPipeline; -use finality_relay::FinalityPipeline; -use pallet_grandpa::{Call as GrandpaCall, Config as GrandpaConfig}; -use relay_substrate_client::{AccountKeyPairOf, CallOf, Chain, ChainWithTransactions, Client}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; -use sp_runtime::traits::{Block, Header}; -use std::marker::PhantomData; - -/// Convenience trait that adds bounds to `SubstrateEquivocationDetectionPipeline`. -pub trait BaseSubstrateEquivocationDetectionPipeline: - SubstrateFinalityPipeline -{ - /// Bounded `SubstrateFinalityPipeline::SourceChain`. - type BoundedSourceChain: ChainWithTransactions; - - /// Bounded `AccountIdOf`. - type BoundedSourceChainAccountId: From< as Pair>::Public> - + Send; -} - -impl BaseSubstrateEquivocationDetectionPipeline for T -where - T: SubstrateFinalityPipeline, - T::SourceChain: ChainWithTransactions, - AccountIdOf: From< as Pair>::Public>, -{ - type BoundedSourceChain = T::SourceChain; - type BoundedSourceChainAccountId = AccountIdOf; -} - -/// Substrate -> Substrate equivocation detection pipeline. -#[async_trait] -pub trait SubstrateEquivocationDetectionPipeline: - BaseSubstrateEquivocationDetectionPipeline -{ - /// How the `report_equivocation` call is built ? - type ReportEquivocationCallBuilder: ReportEquivocationCallBuilder; - - /// Add relay guards if required. - async fn start_relay_guards( - source_client: &Client, - enable_version_guard: bool, - ) -> relay_substrate_client::Result<()> { - if enable_version_guard { - relay_substrate_client::guard::abort_on_spec_version_change( - source_client.clone(), - source_client.simple_runtime_version().await?.spec_version, - ); - } - Ok(()) - } -} - -type FinalityProoffOf

= <

::FinalityEngine as Engine< -

::SourceChain, ->>::FinalityProof; -type FinalityVerificationContextfOf

= - <

::FinalityEngine as Engine< -

::SourceChain, - >>::FinalityVerificationContext; -/// The type of the equivocation proof used by the `SubstrateEquivocationDetectionPipeline` -pub type EquivocationProofOf

= <

::FinalityEngine as Engine< -

::SourceChain, ->>::EquivocationProof; -type EquivocationsFinderOf

= <

::FinalityEngine as Engine< -

::SourceChain, ->>::EquivocationsFinder; -/// The type of the key owner proof used by the `SubstrateEquivocationDetectionPipeline` -pub type KeyOwnerProofOf

= <

::FinalityEngine as Engine< -

::SourceChain, ->>::KeyOwnerProof; - -/// Adapter that allows a `SubstrateEquivocationDetectionPipeline` to act as an -/// `EquivocationDetectionPipeline`. -#[derive(Clone, Debug)] -pub struct EquivocationDetectionPipelineAdapter { - _phantom: PhantomData

, -} - -impl FinalityPipeline - for EquivocationDetectionPipelineAdapter

-{ - const SOURCE_NAME: &'static str = P::SourceChain::NAME; - const TARGET_NAME: &'static str = P::TargetChain::NAME; - - type Hash = HashOf; - type Number = BlockNumberOf; - type FinalityProof = SubstrateFinalityProof

; -} - -impl EquivocationDetectionPipeline - for EquivocationDetectionPipelineAdapter

-{ - type TargetNumber = BlockNumberOf; - type FinalityVerificationContext = FinalityVerificationContextfOf

; - type EquivocationProof = EquivocationProofOf

; - type EquivocationsFinder = EquivocationsFinderOf

; -} - -/// Different ways of building `report_equivocation` calls. -pub trait ReportEquivocationCallBuilder { - /// Build a `report_equivocation` call to be executed on the source chain. - fn build_report_equivocation_call( - equivocation_proof: EquivocationProofOf

, - key_owner_proof: KeyOwnerProofOf

, - ) -> CallOf; -} - -/// Building the `report_equivocation` call when having direct access to the target chain runtime. -pub struct DirectReportGrandpaEquivocationCallBuilder { - _phantom: PhantomData<(P, R)>, -} - -impl ReportEquivocationCallBuilder

for DirectReportGrandpaEquivocationCallBuilder -where - P: SubstrateEquivocationDetectionPipeline, - P::FinalityEngine: Engine< - P::SourceChain, - EquivocationProof = sp_consensus_grandpa::EquivocationProof< - HashOf, - BlockNumberOf, - >, - >, - R: frame_system::Config> - + GrandpaConfig>, - ::Header: Header>, - CallOf: From>, -{ - fn build_report_equivocation_call( - equivocation_proof: EquivocationProofOf

, - key_owner_proof: KeyOwnerProofOf

, - ) -> CallOf { - GrandpaCall::::report_equivocation { - equivocation_proof: Box::new(equivocation_proof), - key_owner_proof, - } - .into() - } -} - -/// Macro that generates `ReportEquivocationCallBuilder` implementation for the case where -/// we only have access to the mocked version of the source chain runtime. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_report_equivocation_call_builder { - ($pipeline:ident, $mocked_builder:ident, $grandpa:path, $report_equivocation:path) => { - pub struct $mocked_builder; - - impl $crate::equivocation::ReportEquivocationCallBuilder<$pipeline> - for $mocked_builder - { - fn build_report_equivocation_call( - equivocation_proof: $crate::equivocation::EquivocationProofOf<$pipeline>, - key_owner_proof: $crate::equivocation::KeyOwnerProofOf<$pipeline>, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain - > { - bp_runtime::paste::item! { - $grandpa($report_equivocation { - equivocation_proof: Box::new(equivocation_proof), - key_owner_proof: key_owner_proof - }) - } - } - } - }; -} - -/// Run Substrate-to-Substrate equivocations detection loop. -pub async fn run( - source_client: Client, - target_client: Client, - source_transaction_params: TransactionParams>, - metrics_params: MetricsParams, -) -> anyhow::Result<()> { - log::info!( - target: "bridge", - "Starting {} -> {} equivocations detection loop", - P::SourceChain::NAME, - P::TargetChain::NAME, - ); - - equivocation_detector::run( - SubstrateEquivocationSource::

::new(source_client, source_transaction_params), - SubstrateEquivocationTarget::

::new(target_client), - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(|e| anyhow::format_err!("{}", e)) -} diff --git a/relays/lib-substrate-relay/src/equivocation/source.rs b/relays/lib-substrate-relay/src/equivocation/source.rs deleted file mode 100644 index a0c7dcf5cbc32c7e5a39de5acd53d92def24a22f..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/equivocation/source.rs +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Default generic implementation of equivocation source for basic Substrate client. - -use crate::{ - equivocation::{ - EquivocationDetectionPipelineAdapter, EquivocationProofOf, ReportEquivocationCallBuilder, - SubstrateEquivocationDetectionPipeline, - }, - finality_base::{engine::Engine, finality_proofs, SubstrateFinalityProofsStream}, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_runtime::{HashOf, TransactionEra}; -use equivocation_detector::SourceClient; -use finality_relay::SourceClientBase; -use relay_substrate_client::{ - AccountKeyPairOf, Client, Error, TransactionTracker, UnsignedTransaction, -}; -use relay_utils::relay_loop::Client as RelayClient; - -/// Substrate node as equivocation source. -pub struct SubstrateEquivocationSource { - client: Client, - transaction_params: TransactionParams>, -} - -impl SubstrateEquivocationSource

{ - /// Create new instance of `SubstrateEquivocationSource`. - pub fn new( - client: Client, - transaction_params: TransactionParams>, - ) -> Self { - Self { client, transaction_params } - } -} - -impl Clone for SubstrateEquivocationSource

{ - fn clone(&self) -> Self { - Self { client: self.client.clone(), transaction_params: self.transaction_params.clone() } - } -} - -#[async_trait] -impl RelayClient for SubstrateEquivocationSource

{ - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl - SourceClientBase> for SubstrateEquivocationSource

-{ - type FinalityProofsStream = SubstrateFinalityProofsStream

; - - async fn finality_proofs(&self) -> Result { - finality_proofs::

(&self.client).await - } -} - -#[async_trait] -impl - SourceClient> for SubstrateEquivocationSource

-{ - type TransactionTracker = TransactionTracker>; - - async fn report_equivocation( - &self, - at: HashOf, - equivocation: EquivocationProofOf

, - ) -> Result { - let key_owner_proof = - P::FinalityEngine::generate_source_key_ownership_proof(&self.client, at, &equivocation) - .await?; - - let mortality = self.transaction_params.mortality; - let call = P::ReportEquivocationCallBuilder::build_report_equivocation_call( - equivocation, - key_owner_proof, - ); - self.client - .submit_and_watch_signed_extrinsic( - &self.transaction_params.signer, - move |best_block_id, transaction_nonce| { - Ok(UnsignedTransaction::new(call.into(), transaction_nonce) - .era(TransactionEra::new(best_block_id, mortality))) - }, - ) - .await - } -} diff --git a/relays/lib-substrate-relay/src/equivocation/target.rs b/relays/lib-substrate-relay/src/equivocation/target.rs deleted file mode 100644 index 6eee2ab91d45b033a77e30b7d05ae28b246b9735..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/equivocation/target.rs +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Default generic implementation of equivocation source for basic Substrate client. - -use crate::{ - equivocation::{ - EquivocationDetectionPipelineAdapter, FinalityProoffOf, FinalityVerificationContextfOf, - SubstrateEquivocationDetectionPipeline, - }, - finality_base::{best_synced_header_id, engine::Engine}, -}; - -use async_trait::async_trait; -use bp_header_chain::HeaderFinalityInfo; -use bp_runtime::{BlockNumberOf, HashOf}; -use equivocation_detector::TargetClient; -use relay_substrate_client::{Client, Error}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_runtime::traits::Header; -use std::marker::PhantomData; - -/// Substrate node as equivocation source. -pub struct SubstrateEquivocationTarget { - client: Client, - - _phantom: PhantomData

, -} - -impl SubstrateEquivocationTarget

{ - /// Create new instance of `SubstrateEquivocationTarget`. - pub fn new(client: Client) -> Self { - Self { client, _phantom: Default::default() } - } -} - -impl Clone for SubstrateEquivocationTarget

{ - fn clone(&self) -> Self { - Self { client: self.client.clone(), _phantom: Default::default() } - } -} - -#[async_trait] -impl RelayClient for SubstrateEquivocationTarget

{ - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl - TargetClient> for SubstrateEquivocationTarget

-{ - async fn best_finalized_header_number( - &self, - ) -> Result, Self::Error> { - self.client.best_finalized_header_number().await - } - - async fn best_synced_header_hash( - &self, - at: BlockNumberOf, - ) -> Result>, Self::Error> { - Ok(best_synced_header_id::( - &self.client, - self.client.header_by_number(at).await?.hash(), - ) - .await? - .map(|id| id.hash())) - } - - async fn finality_verification_context( - &self, - at: BlockNumberOf, - ) -> Result, Self::Error> { - P::FinalityEngine::finality_verification_context( - &self.client, - self.client.header_by_number(at).await?.hash(), - ) - .await - } - - async fn synced_headers_finality_info( - &self, - at: BlockNumberOf, - ) -> Result< - Vec, FinalityVerificationContextfOf

>>, - Self::Error, - > { - P::FinalityEngine::synced_headers_finality_info( - &self.client, - self.client.header_by_number(at).await?.hash(), - ) - .await - } -} diff --git a/relays/lib-substrate-relay/src/error.rs b/relays/lib-substrate-relay/src/error.rs deleted file mode 100644 index 2ebd9130f3912ba4c0552860a2259b222220e8f8..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/error.rs +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relay errors. - -use relay_substrate_client as client; -use sp_consensus_grandpa::AuthorityList; -use sp_runtime::traits::MaybeDisplay; -use std::fmt::Debug; -use thiserror::Error; - -/// Relay errors. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to submit signed extrinsic from to the target chain. - #[error("Failed to submit {0} transaction: {1:?}")] - SubmitTransaction(&'static str, client::Error), - /// Failed subscribe to justification stream of the source chain. - #[error("Failed to subscribe to {0} justifications: {1:?}")] - Subscribe(&'static str, client::Error), - /// Failed subscribe to read justification from the source chain (client error). - #[error("Failed to read {0} justification from the stream: {1}")] - ReadJustification(&'static str, client::Error), - /// Failed subscribe to read justification from the source chain (stream ended). - #[error("Failed to read {0} justification from the stream: stream has ended unexpectedly")] - ReadJustificationStreamEnded(&'static str), - /// Failed subscribe to decode justification from the source chain. - #[error("Failed to decode {0} justification: {1:?}")] - DecodeJustification(&'static str, codec::Error), - /// GRANDPA authorities read from the source chain are invalid. - #[error("Read invalid {0} authorities set: {1:?}")] - ReadInvalidAuthorities(&'static str, AuthorityList), - /// Failed to guess initial GRANDPA authorities at the given header of the source chain. - #[error("Failed to guess initial {0} GRANDPA authorities set id: checked all possible ids in range [0; {1}]")] - GuessInitialAuthorities(&'static str, HeaderNumber), - /// Failed to retrieve GRANDPA authorities at the given header from the source chain. - #[error("Failed to retrive {0} GRANDPA authorities set at header {1}: {2:?}")] - RetrieveAuthorities(&'static str, Hash, client::Error), - /// Failed to decode GRANDPA authorities at the given header of the source chain. - #[error("Failed to decode {0} GRANDPA authorities set at header {1}: {2:?}")] - DecodeAuthorities(&'static str, Hash, codec::Error), - /// Failed to retrieve header by the hash from the source chain. - #[error("Failed to retrieve {0} header with hash {1}: {2:?}")] - RetrieveHeader(&'static str, Hash, client::Error), - /// Failed to submit signed extrinsic from to the target chain. - #[error( - "Failed to retrieve `is_initialized` flag of the with-{0} finality pallet at {1}: {2:?}" - )] - IsInitializedRetrieve(&'static str, &'static str, client::Error), -} diff --git a/relays/lib-substrate-relay/src/finality/initialize.rs b/relays/lib-substrate-relay/src/finality/initialize.rs deleted file mode 100644 index 5dde46c39dd674e7c01eebba4b014bc999611eb5..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/finality/initialize.rs +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Initialize Substrate -> Substrate finality bridge. -//! -//! Initialization is a transaction that calls `initialize()` function of the -//! finality pallet (GRANDPA/BEEFY/...). This transaction brings initial header -//! and authorities set from source to target chain. The finality sync starts -//! with this header. - -use crate::{error::Error, finality_base::engine::Engine}; -use sp_core::Pair; - -use bp_runtime::HeaderIdOf; -use relay_substrate_client::{ - AccountKeyPairOf, Chain, ChainWithTransactions, Client, Error as SubstrateError, - UnsignedTransaction, -}; -use relay_utils::{TrackedTransactionStatus, TransactionTracker}; -use sp_runtime::traits::Header as HeaderT; - -/// Submit headers-bridge initialization transaction. -pub async fn initialize< - E: Engine, - SourceChain: Chain, - TargetChain: ChainWithTransactions, - F, ->( - source_client: Client, - target_client: Client, - target_signer: AccountKeyPairOf, - prepare_initialize_transaction: F, - dry_run: bool, -) where - F: FnOnce( - TargetChain::Nonce, - E::InitializationData, - ) -> Result, SubstrateError> - + Send - + 'static, - TargetChain::AccountId: From<::Public>, -{ - let result = do_initialize::( - source_client, - target_client, - target_signer, - prepare_initialize_transaction, - dry_run, - ) - .await; - - match result { - Ok(Some(tx_status)) => match tx_status { - TrackedTransactionStatus::Lost => { - log::error!( - target: "bridge", - "Failed to execute {}-headers bridge initialization transaction on {}: {:?}.", - SourceChain::NAME, - TargetChain::NAME, - tx_status - ) - }, - TrackedTransactionStatus::Finalized(_) => { - log::info!( - target: "bridge", - "Successfully executed {}-headers bridge initialization transaction on {}: {:?}.", - SourceChain::NAME, - TargetChain::NAME, - tx_status - ) - }, - }, - Ok(None) => (), - Err(err) => log::error!( - target: "bridge", - "Failed to submit {}-headers bridge initialization transaction to {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - err, - ), - } -} - -/// Craft and submit initialization transaction, returning any error that may occur. -async fn do_initialize< - E: Engine, - SourceChain: Chain, - TargetChain: ChainWithTransactions, - F, ->( - source_client: Client, - target_client: Client, - target_signer: AccountKeyPairOf, - prepare_initialize_transaction: F, - dry_run: bool, -) -> Result< - Option>>, - Error::Number>, -> -where - F: FnOnce( - TargetChain::Nonce, - E::InitializationData, - ) -> Result, SubstrateError> - + Send - + 'static, - TargetChain::AccountId: From<::Public>, -{ - let is_initialized = E::is_initialized(&target_client) - .await - .map_err(|e| Error::IsInitializedRetrieve(SourceChain::NAME, TargetChain::NAME, e))?; - if is_initialized { - log::info!( - target: "bridge", - "{}-headers bridge at {} is already initialized. Skipping", - SourceChain::NAME, - TargetChain::NAME, - ); - if !dry_run { - return Ok(None) - } - } - - let initialization_data = E::prepare_initialization_data(source_client).await?; - log::info!( - target: "bridge", - "Prepared initialization data for {}-headers bridge at {}: {:?}", - SourceChain::NAME, - TargetChain::NAME, - initialization_data, - ); - - let tx_status = target_client - .submit_and_watch_signed_extrinsic(&target_signer, move |_, transaction_nonce| { - let tx = prepare_initialize_transaction(transaction_nonce, initialization_data); - if dry_run { - Err(SubstrateError::Custom( - "Not submitting extrinsic in `dry-run` mode!".to_string(), - )) - } else { - tx - } - }) - .await - .map_err(|err| Error::SubmitTransaction(TargetChain::NAME, err))? - .wait() - .await; - - Ok(Some(tx_status)) -} diff --git a/relays/lib-substrate-relay/src/finality/mod.rs b/relays/lib-substrate-relay/src/finality/mod.rs deleted file mode 100644 index a06857ae1d9b2f2340214c2bfa9df06c9683eead..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/finality/mod.rs +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types and functions intended to ease adding of new Substrate -> Substrate -//! finality proofs synchronization pipelines. - -use crate::{ - finality::{source::SubstrateFinalitySource, target::SubstrateFinalityTarget}, - finality_base::{engine::Engine, SubstrateFinalityPipeline, SubstrateFinalityProof}, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_header_chain::justification::{GrandpaJustification, JustificationVerificationContext}; -use finality_relay::{FinalityPipeline, FinalitySyncPipeline, HeadersToRelay}; -use pallet_bridge_grandpa::{Call as BridgeGrandpaCall, Config as BridgeGrandpaConfig}; -use relay_substrate_client::{ - transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, - ChainWithTransactions, Client, HashOf, HeaderOf, SyncHeader, -}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; -use std::{fmt::Debug, marker::PhantomData}; - -pub mod initialize; -pub mod source; -pub mod target; - -/// Default limit of recent finality proofs. -/// -/// Finality delay of 4096 blocks is unlikely to happen in practice in -/// Substrate+GRANDPA based chains (good to know). -pub(crate) const RECENT_FINALITY_PROOFS_LIMIT: usize = 4096; - -/// Convenience trait that adds bounds to `SubstrateFinalitySyncPipeline`. -pub trait BaseSubstrateFinalitySyncPipeline: - SubstrateFinalityPipeline -{ - /// Bounded `SubstrateFinalityPipeline::TargetChain`. - type BoundedTargetChain: ChainWithTransactions; - - /// Bounded `AccountIdOf`. - type BoundedTargetChainAccountId: From< as Pair>::Public> - + Send; -} - -impl BaseSubstrateFinalitySyncPipeline for T -where - T: SubstrateFinalityPipeline, - T::TargetChain: ChainWithTransactions, - AccountIdOf: From< as Pair>::Public>, -{ - type BoundedTargetChain = T::TargetChain; - type BoundedTargetChainAccountId = AccountIdOf; -} - -/// Substrate -> Substrate finality proofs synchronization pipeline. -#[async_trait] -pub trait SubstrateFinalitySyncPipeline: BaseSubstrateFinalitySyncPipeline { - /// How submit finality proof call is built? - type SubmitFinalityProofCallBuilder: SubmitFinalityProofCallBuilder; - - /// Add relay guards if required. - async fn start_relay_guards( - target_client: &Client, - enable_version_guard: bool, - ) -> relay_substrate_client::Result<()> { - if enable_version_guard { - relay_substrate_client::guard::abort_on_spec_version_change( - target_client.clone(), - target_client.simple_runtime_version().await?.spec_version, - ); - } - Ok(()) - } -} - -/// Adapter that allows all `SubstrateFinalitySyncPipeline` to act as `FinalitySyncPipeline`. -#[derive(Clone, Debug)] -pub struct FinalitySyncPipelineAdapter { - _phantom: PhantomData

, -} - -impl FinalityPipeline for FinalitySyncPipelineAdapter

{ - const SOURCE_NAME: &'static str = P::SourceChain::NAME; - const TARGET_NAME: &'static str = P::TargetChain::NAME; - - type Hash = HashOf; - type Number = BlockNumberOf; - type FinalityProof = SubstrateFinalityProof

; -} - -impl FinalitySyncPipeline for FinalitySyncPipelineAdapter

{ - type ConsensusLogReader = >::ConsensusLogReader; - type Header = SyncHeader>; -} - -/// Different ways of building `submit_finality_proof` calls. -pub trait SubmitFinalityProofCallBuilder { - /// Given source chain header, its finality proof and the current authority set id, build call - /// of `submit_finality_proof` function of bridge GRANDPA module at the target chain. - fn build_submit_finality_proof_call( - header: SyncHeader>, - proof: SubstrateFinalityProof

, - is_free_execution_expected: bool, - context: <

::FinalityEngine as Engine>::FinalityVerificationContext, - ) -> CallOf; -} - -/// Building `submit_finality_proof` call when you have direct access to the target -/// chain runtime. -pub struct DirectSubmitGrandpaFinalityProofCallBuilder { - _phantom: PhantomData<(P, R, I)>, -} - -impl SubmitFinalityProofCallBuilder

- for DirectSubmitGrandpaFinalityProofCallBuilder -where - P: SubstrateFinalitySyncPipeline, - R: BridgeGrandpaConfig, - I: 'static, - R::BridgedChain: bp_runtime::Chain

>, - CallOf: From>, - P::FinalityEngine: Engine< - P::SourceChain, - FinalityProof = GrandpaJustification>, - FinalityVerificationContext = JustificationVerificationContext, - >, -{ - fn build_submit_finality_proof_call( - header: SyncHeader>, - proof: GrandpaJustification>, - _is_free_execution_expected: bool, - _context: JustificationVerificationContext, - ) -> CallOf { - BridgeGrandpaCall::::submit_finality_proof { - finality_target: Box::new(header.into_inner()), - justification: proof, - } - .into() - } -} - -/// Macro that generates `SubmitFinalityProofCallBuilder` implementation for the case when -/// you only have an access to the mocked version of target chain runtime. In this case you -/// should provide "name" of the call variant for the bridge GRANDPA calls and the "name" of -/// the variant for the `submit_finality_proof` call within that first option. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_submit_finality_proof_call_builder { - ($pipeline:ident, $mocked_builder:ident, $bridge_grandpa:path, $submit_finality_proof:path) => { - pub struct $mocked_builder; - - impl $crate::finality::SubmitFinalityProofCallBuilder<$pipeline> - for $mocked_builder - { - fn build_submit_finality_proof_call( - header: relay_substrate_client::SyncHeader< - relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain - > - >, - proof: bp_header_chain::justification::GrandpaJustification< - relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain - > - >, - _is_free_execution_expected: bool, - _context: bp_header_chain::justification::JustificationVerificationContext, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::TargetChain - > { - bp_runtime::paste::item! { - $bridge_grandpa($submit_finality_proof { - finality_target: Box::new(header.into_inner()), - justification: proof - }) - } - } - } - }; -} - -/// Macro that generates `SubmitFinalityProofCallBuilder` implementation for the case when -/// you only have an access to the mocked version of target chain runtime. In this case you -/// should provide "name" of the call variant for the bridge GRANDPA calls and the "name" of -/// the variant for the `submit_finality_proof_ex` call within that first option. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_submit_finality_proof_ex_call_builder { - ($pipeline:ident, $mocked_builder:ident, $bridge_grandpa:path, $submit_finality_proof:path) => { - pub struct $mocked_builder; - - impl $crate::finality::SubmitFinalityProofCallBuilder<$pipeline> - for $mocked_builder - { - fn build_submit_finality_proof_call( - header: relay_substrate_client::SyncHeader< - relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain - > - >, - proof: bp_header_chain::justification::GrandpaJustification< - relay_substrate_client::HeaderOf< - <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::SourceChain - > - >, - is_free_execution_expected: bool, - context: bp_header_chain::justification::JustificationVerificationContext, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::finality_base::SubstrateFinalityPipeline>::TargetChain - > { - bp_runtime::paste::item! { - $bridge_grandpa($submit_finality_proof { - finality_target: Box::new(header.into_inner()), - justification: proof, - current_set_id: context.authority_set_id, - is_free_execution_expected, - }) - } - } - } - }; -} - -/// Run Substrate-to-Substrate finality sync loop. -pub async fn run( - source_client: Client, - target_client: Client, - headers_to_relay: HeadersToRelay, - transaction_params: TransactionParams>, - metrics_params: MetricsParams, -) -> anyhow::Result<()> { - log::info!( - target: "bridge", - "Starting {} -> {} finality proof relay: relaying {:?} headers", - P::SourceChain::NAME, - P::TargetChain::NAME, - headers_to_relay, - ); - - finality_relay::run( - SubstrateFinalitySource::

::new(source_client, None), - SubstrateFinalityTarget::

::new(target_client, transaction_params.clone()), - finality_relay::FinalitySyncParams { - tick: std::cmp::max( - P::SourceChain::AVERAGE_BLOCK_INTERVAL, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - ), - recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, - stall_timeout: transaction_stall_timeout( - transaction_params.mortality, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - relay_utils::STALL_TIMEOUT, - ), - headers_to_relay, - }, - metrics_params, - futures::future::pending(), - ) - .await - .map_err(|e| anyhow::format_err!("{}", e)) -} diff --git a/relays/lib-substrate-relay/src/finality/source.rs b/relays/lib-substrate-relay/src/finality/source.rs deleted file mode 100644 index c94af6108957a0d2d4b0b4079220be9c11a5a470..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/finality/source.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Default generic implementation of finality source for basic Substrate client. - -use crate::{ - finality::{FinalitySyncPipelineAdapter, SubstrateFinalitySyncPipeline}, - finality_base::{ - engine::Engine, finality_proofs, SubstrateFinalityProof, SubstrateFinalityProofsStream, - }, -}; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use bp_header_chain::FinalityProof; -use codec::Decode; -use finality_relay::{SourceClient, SourceClientBase}; -use futures::{ - select, - stream::{try_unfold, Stream, StreamExt, TryStreamExt}, -}; -use num_traits::One; -use relay_substrate_client::{BlockNumberOf, BlockWithJustification, Client, Error, HeaderOf}; -use relay_utils::{relay_loop::Client as RelayClient, UniqueSaturatedInto}; - -/// Shared updatable reference to the maximal header number that we want to sync from the source. -pub type RequiredHeaderNumberRef = Arc::BlockNumber>>; - -/// Substrate node as finality source. -pub struct SubstrateFinalitySource { - client: Client, - maximal_header_number: Option>, -} - -impl SubstrateFinalitySource

{ - /// Create new headers source using given client. - pub fn new( - client: Client, - maximal_header_number: Option>, - ) -> Self { - SubstrateFinalitySource { client, maximal_header_number } - } - - /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { - &self.client - } - - /// Returns best finalized block number. - pub async fn on_chain_best_finalized_block_number( - &self, - ) -> Result, Error> { - // we **CAN** continue to relay finality proofs if source node is out of sync, because - // target node may be missing proofs that are already available at the source - self.client.best_finalized_header_number().await - } - - /// Return header and its justification of the given block or its descendant that - /// has a GRANDPA justification. - /// - /// This method is optimized for cases when `block_number` is close to the best finalized - /// chain block. - pub async fn prove_block_finality( - &self, - block_number: BlockNumberOf, - ) -> Result< - (relay_substrate_client::SyncHeader>, SubstrateFinalityProof

), - Error, - > { - // first, subscribe to proofs - let next_persistent_proof = - self.persistent_proofs_stream(block_number + One::one()).await?.fuse(); - let next_ephemeral_proof = self.ephemeral_proofs_stream(block_number).await?.fuse(); - - // in perfect world we'll need to return justfication for the requested `block_number` - let (header, maybe_proof) = self.header_and_finality_proof(block_number).await?; - if let Some(proof) = maybe_proof { - return Ok((header, proof)) - } - - // otherwise we don't care which header to return, so let's select first - futures::pin_mut!(next_persistent_proof, next_ephemeral_proof); - loop { - select! { - maybe_header_and_proof = next_persistent_proof.next() => match maybe_header_and_proof { - Some(header_and_proof) => return header_and_proof, - None => continue, - }, - maybe_header_and_proof = next_ephemeral_proof.next() => match maybe_header_and_proof { - Some(header_and_proof) => return header_and_proof, - None => continue, - }, - complete => return Err(Error::FinalityProofNotFound(block_number.unique_saturated_into())) - } - } - } - - /// Returns stream of headers and their persistent proofs, starting from given block. - async fn persistent_proofs_stream( - &self, - block_number: BlockNumberOf, - ) -> Result< - impl Stream< - Item = Result< - ( - relay_substrate_client::SyncHeader>, - SubstrateFinalityProof

, - ), - Error, - >, - >, - Error, - > { - let client = self.client.clone(); - let best_finalized_block_number = client.best_finalized_header_number().await?; - Ok(try_unfold((client, block_number), move |(client, current_block_number)| async move { - // if we've passed the `best_finalized_block_number`, we no longer need persistent - // justifications - if current_block_number > best_finalized_block_number { - return Ok(None) - } - - let (header, maybe_proof) = - header_and_finality_proof::

(&client, current_block_number).await?; - let next_block_number = current_block_number + One::one(); - let next_state = (client, next_block_number); - - Ok(Some((maybe_proof.map(|proof| (header, proof)), next_state))) - }) - .try_filter_map(|maybe_result| async { Ok(maybe_result) })) - } - - /// Returns stream of headers and their ephemeral proofs, starting from given block. - async fn ephemeral_proofs_stream( - &self, - block_number: BlockNumberOf, - ) -> Result< - impl Stream< - Item = Result< - ( - relay_substrate_client::SyncHeader>, - SubstrateFinalityProof

, - ), - Error, - >, - >, - Error, - > { - let client = self.client.clone(); - Ok(self.finality_proofs().await?.map(Ok).try_filter_map(move |proof| { - let client = client.clone(); - async move { - if proof.target_header_number() < block_number { - return Ok(None) - } - - let header = client.header_by_number(proof.target_header_number()).await?; - Ok(Some((header.into(), proof))) - } - })) - } -} - -impl Clone for SubstrateFinalitySource

{ - fn clone(&self) -> Self { - SubstrateFinalitySource { - client: self.client.clone(), - maximal_header_number: self.maximal_header_number.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateFinalitySource

{ - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl SourceClientBase> - for SubstrateFinalitySource

-{ - type FinalityProofsStream = SubstrateFinalityProofsStream

; - - async fn finality_proofs(&self) -> Result { - finality_proofs::

(&self.client).await - } -} - -#[async_trait] -impl SourceClient> - for SubstrateFinalitySource

-{ - async fn best_finalized_block_number(&self) -> Result, Error> { - let mut finalized_header_number = self.on_chain_best_finalized_block_number().await?; - // never return block number larger than requested. This way we'll never sync headers - // past `maximal_header_number` - if let Some(ref maximal_header_number) = self.maximal_header_number { - let maximal_header_number = *maximal_header_number.lock().await; - if finalized_header_number > maximal_header_number { - finalized_header_number = maximal_header_number; - } - } - Ok(finalized_header_number) - } - - async fn header_and_finality_proof( - &self, - number: BlockNumberOf, - ) -> Result< - ( - relay_substrate_client::SyncHeader>, - Option>, - ), - Error, - > { - header_and_finality_proof::

(&self.client, number).await - } -} - -async fn header_and_finality_proof( - client: &Client, - number: BlockNumberOf, -) -> Result< - ( - relay_substrate_client::SyncHeader>, - Option>, - ), - Error, -> { - let header_hash = client.block_hash_by_number(number).await?; - let signed_block = client.get_block(Some(header_hash)).await?; - - let justification = signed_block - .justification(P::FinalityEngine::ID) - .map(|raw_justification| { - SubstrateFinalityProof::

::decode(&mut raw_justification.as_slice()) - }) - .transpose() - .map_err(Error::ResponseParseFailed)?; - - Ok((signed_block.header().into(), justification)) -} diff --git a/relays/lib-substrate-relay/src/finality/target.rs b/relays/lib-substrate-relay/src/finality/target.rs deleted file mode 100644 index adbcfe0096d5f46ddacdf80e335f74967febca0e..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/finality/target.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate finality proof target. - -use crate::{ - finality::{ - FinalitySyncPipelineAdapter, SubmitFinalityProofCallBuilder, SubstrateFinalitySyncPipeline, - }, - finality_base::{best_synced_header_id, engine::Engine, SubstrateFinalityProof}, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_runtime::BlockNumberOf; -use finality_relay::TargetClient; -use relay_substrate_client::{ - AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, - TransactionTracker, UnsignedTransaction, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_runtime::traits::Header; - -/// Substrate client as Substrate finality target. -pub struct SubstrateFinalityTarget { - client: Client, - transaction_params: TransactionParams>, -} - -impl SubstrateFinalityTarget

{ - /// Create new Substrate headers target. - pub fn new( - client: Client, - transaction_params: TransactionParams>, - ) -> Self { - SubstrateFinalityTarget { client, transaction_params } - } - - /// Ensure that the bridge pallet at target chain is active. - pub async fn ensure_pallet_active(&self) -> Result<(), Error> { - let is_halted = P::FinalityEngine::is_halted(&self.client).await?; - if is_halted { - return Err(Error::BridgePalletIsHalted) - } - - let is_initialized = P::FinalityEngine::is_initialized(&self.client).await?; - if !is_initialized { - return Err(Error::BridgePalletIsNotInitialized) - } - - Ok(()) - } -} - -impl Clone for SubstrateFinalityTarget

{ - fn clone(&self) -> Self { - SubstrateFinalityTarget { - client: self.client.clone(), - transaction_params: self.transaction_params.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateFinalityTarget

{ - type Error = Error; - - async fn reconnect(&mut self) -> Result<(), Error> { - self.client.reconnect().await - } -} - -#[async_trait] -impl TargetClient> - for SubstrateFinalityTarget

-{ - type TransactionTracker = TransactionTracker>; - - async fn best_finalized_source_block_id(&self) -> Result, Error> { - // we can't continue to relay finality if target node is out of sync, because - // it may have already received (some of) headers that we're going to relay - self.client.ensure_synced().await?; - // we can't relay finality if bridge pallet at target chain is halted - self.ensure_pallet_active().await?; - - Ok(best_synced_header_id::( - &self.client, - self.client.best_header().await?.hash(), - ) - .await? - .ok_or(Error::BridgePalletIsNotInitialized)?) - } - - async fn free_source_headers_interval( - &self, - ) -> Result>, Self::Error> { - self.client - .typed_state_call( - P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), - (), - Some(self.client.best_header().await?.hash()), - ) - .await - } - - async fn submit_finality_proof( - &self, - header: SyncHeader>, - mut proof: SubstrateFinalityProof

, - is_free_execution_expected: bool, - ) -> Result { - // verify and runtime module at target chain may require optimized finality proof - let context = - P::FinalityEngine::verify_and_optimize_proof(&self.client, &header, &mut proof).await?; - - // now we may submit optimized finality proof - let mortality = self.transaction_params.mortality; - let call = P::SubmitFinalityProofCallBuilder::build_submit_finality_proof_call( - header, - proof, - is_free_execution_expected, - context, - ); - self.client - .submit_and_watch_signed_extrinsic( - &self.transaction_params.signer, - move |best_block_id, transaction_nonce| { - Ok(UnsignedTransaction::new(call.into(), transaction_nonce) - .era(TransactionEra::new(best_block_id, mortality))) - }, - ) - .await - } -} diff --git a/relays/lib-substrate-relay/src/finality_base/engine.rs b/relays/lib-substrate-relay/src/finality_base/engine.rs deleted file mode 100644 index e517b0fd9b9abd50d6445e7222ef24ed946554bf..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/finality_base/engine.rs +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Support of different finality engines, available in Substrate. - -use crate::error::Error; -use async_trait::async_trait; -use bp_header_chain::{ - justification::{ - verify_and_optimize_justification, GrandpaEquivocationsFinder, GrandpaJustification, - JustificationVerificationContext, - }, - max_expected_submit_finality_proof_arguments_size, AuthoritySet, ConsensusLogReader, - FinalityProof, FindEquivocations, GrandpaConsensusLogReader, HeaderFinalityInfo, - HeaderGrandpaInfo, StoredHeaderGrandpaInfo, -}; -use bp_runtime::{BasicOperatingMode, HeaderIdProvider, OperatingMode}; -use codec::{Decode, Encode}; -use num_traits::{One, Zero}; -use relay_substrate_client::{ - BlockNumberOf, Chain, ChainWithGrandpa, Client, Error as SubstrateError, HashOf, HeaderOf, - Subscription, SubstrateFinalityClient, SubstrateGrandpaFinalityClient, -}; -use sp_consensus_grandpa::{AuthorityList as GrandpaAuthoritiesSet, GRANDPA_ENGINE_ID}; -use sp_core::{storage::StorageKey, Bytes}; -use sp_runtime::{scale_info::TypeInfo, traits::Header, ConsensusEngineId, SaturatedConversion}; -use std::{fmt::Debug, marker::PhantomData}; - -/// Result of checking maximal expected call size. -pub enum MaxExpectedCallSizeCheck { - /// Size is ok and call will be refunded. - Ok, - /// The call size exceeds the maximal expected and relayer will only get partial refund. - Exceeds { - /// Actual call size. - call_size: u32, - /// Maximal expected call size. - max_call_size: u32, - }, -} - -/// Finality engine, used by the Substrate chain. -#[async_trait] -pub trait Engine: Send { - /// Unique consensus engine identifier. - const ID: ConsensusEngineId; - /// A reader that can extract the consensus log from the header digest and interpret it. - type ConsensusLogReader: ConsensusLogReader; - /// Type of Finality RPC client used by this engine. - type FinalityClient: SubstrateFinalityClient; - /// Type of finality proofs, used by consensus engine. - type FinalityProof: FinalityProof, BlockNumberOf> + Decode + Encode; - /// The context needed for verifying finality proofs. - type FinalityVerificationContext: Debug + Send; - /// The type of the equivocation proof used by the consensus engine. - type EquivocationProof: Clone + Debug + Send + Sync; - /// The equivocations finder. - type EquivocationsFinder: FindEquivocations< - Self::FinalityProof, - Self::FinalityVerificationContext, - Self::EquivocationProof, - >; - /// The type of the key owner proof used by the consensus engine. - type KeyOwnerProof: Send; - /// Type of bridge pallet initialization data. - type InitializationData: Debug + Send + Sync + 'static; - /// Type of bridge pallet operating mode. - type OperatingMode: OperatingMode + 'static; - - /// Returns storage at the bridged (target) chain that corresponds to some value that is - /// missing from the storage until bridge pallet is initialized. - /// - /// Note that we don't care about type of the value - just if it present or not. - fn is_initialized_key() -> StorageKey; - - /// Returns `Ok(true)` if finality pallet at the bridged chain has already been initialized. - async fn is_initialized( - target_client: &Client, - ) -> Result { - Ok(target_client - .raw_storage_value(Self::is_initialized_key(), None) - .await? - .is_some()) - } - - /// Returns storage key at the bridged (target) chain that corresponds to the variable - /// that holds the operating mode of the pallet. - fn pallet_operating_mode_key() -> StorageKey; - - /// Returns `Ok(true)` if finality pallet at the bridged chain is halted. - async fn is_halted( - target_client: &Client, - ) -> Result { - Ok(target_client - .storage_value::(Self::pallet_operating_mode_key(), None) - .await? - .map(|operating_mode| operating_mode.is_halted()) - .unwrap_or(false)) - } - - /// A method to subscribe to encoded finality proofs, given source client. - async fn source_finality_proofs( - source_client: &Client, - ) -> Result, SubstrateError> { - source_client.subscribe_finality_justifications::().await - } - - /// Verify and optimize finality proof before sending it to the target node. - /// - /// Apart from optimization, we expect this method to perform all required checks - /// that the `header` and `proof` are valid at the current state of the target chain. - async fn verify_and_optimize_proof( - target_client: &Client, - header: &C::Header, - proof: &mut Self::FinalityProof, - ) -> Result; - - /// Checks whether the given `header` and its finality `proof` fit the maximal expected - /// call size limit. If result is `MaxExpectedCallSizeCheck::Exceeds { .. }`, this - /// submission won't be fully refunded and relayer will spend its own funds on that. - fn check_max_expected_call_size( - header: &C::Header, - proof: &Self::FinalityProof, - ) -> MaxExpectedCallSizeCheck; - - /// Prepare initialization data for the finality bridge pallet. - async fn prepare_initialization_data( - client: Client, - ) -> Result, BlockNumberOf>>; - - /// Get the context needed for validating a finality proof. - async fn finality_verification_context( - target_client: &Client, - at: HashOf, - ) -> Result; - - /// Returns the finality info associated to the source headers synced with the target - /// at the provided block. - async fn synced_headers_finality_info( - target_client: &Client, - at: TargetChain::Hash, - ) -> Result< - Vec>, - SubstrateError, - >; - - /// Generate key ownership proof for the provided equivocation. - async fn generate_source_key_ownership_proof( - source_client: &Client, - at: C::Hash, - equivocation: &Self::EquivocationProof, - ) -> Result; -} - -/// GRANDPA finality engine. -pub struct Grandpa(PhantomData); - -impl Grandpa { - /// Read header by hash from the source client. - async fn source_header( - source_client: &Client, - header_hash: C::Hash, - ) -> Result, BlockNumberOf>> { - source_client - .header_by_hash(header_hash) - .await - .map_err(|err| Error::RetrieveHeader(C::NAME, header_hash, err)) - } - - /// Read GRANDPA authorities set at given header. - async fn source_authorities_set( - source_client: &Client, - header_hash: C::Hash, - ) -> Result, BlockNumberOf>> { - let raw_authorities_set = source_client - .grandpa_authorities_set(header_hash) - .await - .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err))?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) - .map_err(|err| Error::DecodeAuthorities(C::NAME, header_hash, err)) - } -} - -#[async_trait] -impl Engine for Grandpa { - const ID: ConsensusEngineId = GRANDPA_ENGINE_ID; - type ConsensusLogReader = GrandpaConsensusLogReader<::Number>; - type FinalityClient = SubstrateGrandpaFinalityClient; - type FinalityProof = GrandpaJustification>; - type FinalityVerificationContext = JustificationVerificationContext; - type EquivocationProof = sp_consensus_grandpa::EquivocationProof, BlockNumberOf>; - type EquivocationsFinder = GrandpaEquivocationsFinder; - type KeyOwnerProof = C::KeyOwnerProof; - type InitializationData = bp_header_chain::InitializationData; - type OperatingMode = BasicOperatingMode; - - fn is_initialized_key() -> StorageKey { - bp_header_chain::storage_keys::best_finalized_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) - } - - fn pallet_operating_mode_key() -> StorageKey { - bp_header_chain::storage_keys::pallet_operating_mode_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) - } - - async fn verify_and_optimize_proof( - target_client: &Client, - header: &C::Header, - proof: &mut Self::FinalityProof, - ) -> Result { - let verification_context = Grandpa::::finality_verification_context( - target_client, - target_client.best_header().await?.hash(), - ) - .await?; - // we're risking with race here - we have decided to submit justification some time ago and - // actual authorities set (which we have read now) may have changed, so this - // `optimize_justification` may fail. But if target chain is configured properly, it'll fail - // anyway, after we submit transaction and failing earlier is better. So - it is fine - verify_and_optimize_justification( - (header.hash(), *header.number()), - &verification_context, - proof, - ) - .map(|_| verification_context) - .map_err(|e| { - SubstrateError::Custom(format!( - "Failed to optimize {} GRANDPA jutification for header {:?}: {:?}", - C::NAME, - header.id(), - e, - )) - }) - } - - fn check_max_expected_call_size( - header: &C::Header, - proof: &Self::FinalityProof, - ) -> MaxExpectedCallSizeCheck { - let is_mandatory = Self::ConsensusLogReader::schedules_authorities_change(header.digest()); - let call_size: u32 = - header.encoded_size().saturating_add(proof.encoded_size()).saturated_into(); - let max_call_size = max_expected_submit_finality_proof_arguments_size::( - is_mandatory, - proof.commit.precommits.len().saturated_into(), - ); - if call_size > max_call_size { - MaxExpectedCallSizeCheck::Exceeds { call_size, max_call_size } - } else { - MaxExpectedCallSizeCheck::Ok - } - } - - /// Prepare initialization data for the GRANDPA verifier pallet. - async fn prepare_initialization_data( - source_client: Client, - ) -> Result, BlockNumberOf>> { - // In ideal world we just need to get best finalized header and then to read GRANDPA - // authorities set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at - // this header. - // - // But now there are problems with this approach - `CurrentSetId` may return invalid value. - // So here we're waiting for the next justification, read the authorities set and then try - // to figure out the set id with bruteforce. - let justifications = Self::source_finality_proofs(&source_client) - .await - .map_err(|err| Error::Subscribe(C::NAME, err))?; - // Read next justification - the header that it finalizes will be used as initial header. - let justification = justifications - .next() - .await - .map_err(|e| Error::ReadJustification(C::NAME, e)) - .and_then(|justification| { - justification.ok_or(Error::ReadJustificationStreamEnded(C::NAME)) - })?; - - // Read initial header. - let justification: GrandpaJustification = - Decode::decode(&mut &justification.0[..]) - .map_err(|err| Error::DecodeJustification(C::NAME, err))?; - - let (initial_header_hash, initial_header_number) = - (justification.commit.target_hash, justification.commit.target_number); - - let initial_header = Self::source_header(&source_client, initial_header_hash).await?; - log::trace!(target: "bridge", "Selected {} initial header: {}/{}", - C::NAME, - initial_header_number, - initial_header_hash, - ); - - // Read GRANDPA authorities set at initial header. - let initial_authorities_set = - Self::source_authorities_set(&source_client, initial_header_hash).await?; - log::trace!(target: "bridge", "Selected {} initial authorities set: {:?}", - C::NAME, - initial_authorities_set, - ); - - // If initial header changes the GRANDPA authorities set, then we need previous authorities - // to verify justification. - let mut authorities_for_verification = initial_authorities_set.clone(); - let scheduled_change = GrandpaConsensusLogReader::>::find_scheduled_change( - initial_header.digest(), - ); - assert!( - scheduled_change.as_ref().map(|c| c.delay.is_zero()).unwrap_or(true), - "GRANDPA authorities change at {} scheduled to happen in {:?} blocks. We expect\ - regular change to have zero delay", - initial_header_hash, - scheduled_change.as_ref().map(|c| c.delay), - ); - let schedules_change = scheduled_change.is_some(); - if schedules_change { - authorities_for_verification = - Self::source_authorities_set(&source_client, *initial_header.parent_hash()).await?; - log::trace!( - target: "bridge", - "Selected {} header is scheduling GRANDPA authorities set changes. Using previous set: {:?}", - C::NAME, - authorities_for_verification, - ); - } - - // Now let's try to guess authorities set id by verifying justification. - let mut initial_authorities_set_id = 0; - let mut min_possible_block_number = C::BlockNumber::zero(); - loop { - log::trace!( - target: "bridge", "Trying {} GRANDPA authorities set id: {}", - C::NAME, - initial_authorities_set_id, - ); - - let is_valid_set_id = verify_and_optimize_justification( - (initial_header_hash, initial_header_number), - &AuthoritySet { - authorities: authorities_for_verification.clone(), - set_id: initial_authorities_set_id, - } - .try_into() - .map_err(|_| { - Error::ReadInvalidAuthorities(C::NAME, authorities_for_verification.clone()) - })?, - &mut justification.clone(), - ) - .is_ok(); - - if is_valid_set_id { - break - } - - initial_authorities_set_id += 1; - min_possible_block_number += One::one(); - if min_possible_block_number > initial_header_number { - // there can't be more authorities set changes than headers => if we have reached - // `initial_block_number` and still have not found correct value of - // `initial_authorities_set_id`, then something else is broken => fail - return Err(Error::GuessInitialAuthorities(C::NAME, initial_header_number)) - } - } - - Ok(bp_header_chain::InitializationData { - header: Box::new(initial_header), - authority_list: initial_authorities_set, - set_id: if schedules_change { - initial_authorities_set_id + 1 - } else { - initial_authorities_set_id - }, - operating_mode: BasicOperatingMode::Normal, - }) - } - - async fn finality_verification_context( - target_client: &Client, - at: HashOf, - ) -> Result { - let current_authority_set_key = bp_header_chain::storage_keys::current_authority_set_key( - C::WITH_CHAIN_GRANDPA_PALLET_NAME, - ); - let authority_set: AuthoritySet = target_client - .storage_value(current_authority_set_key, Some(at)) - .await? - .map(Ok) - .unwrap_or(Err(SubstrateError::Custom(format!( - "{} `CurrentAuthoritySet` is missing from the {} storage", - C::NAME, - TargetChain::NAME, - ))))?; - - authority_set.try_into().map_err(|e| { - SubstrateError::Custom(format!( - "{} `CurrentAuthoritySet` from the {} storage is invalid: {e:?}", - C::NAME, - TargetChain::NAME, - )) - }) - } - - async fn synced_headers_finality_info( - target_client: &Client, - at: TargetChain::Hash, - ) -> Result>>, SubstrateError> { - let stored_headers_grandpa_info: Vec>> = target_client - .typed_state_call(C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), (), Some(at)) - .await?; - - let mut headers_grandpa_info = vec![]; - for stored_header_grandpa_info in stored_headers_grandpa_info { - headers_grandpa_info.push(stored_header_grandpa_info.try_into().map_err(|e| { - SubstrateError::Custom(format!( - "{} `AuthoritySet` synced to {} is invalid: {e:?} ", - C::NAME, - TargetChain::NAME, - )) - })?); - } - - Ok(headers_grandpa_info) - } - - async fn generate_source_key_ownership_proof( - source_client: &Client, - at: C::Hash, - equivocation: &Self::EquivocationProof, - ) -> Result { - let set_id = equivocation.set_id(); - let offender = equivocation.offender(); - - let opaque_key_owner_proof = source_client - .generate_grandpa_key_ownership_proof(at, set_id, offender.clone()) - .await? - .ok_or(SubstrateError::Custom(format!( - "Couldn't get GRANDPA key ownership proof from {} at block: {at} \ - for offender: {:?}, set_id: {set_id} ", - C::NAME, - offender.clone(), - )))?; - - let key_owner_proof = - opaque_key_owner_proof.decode().ok_or(SubstrateError::Custom(format!( - "Couldn't decode GRANDPA `OpaqueKeyOwnnershipProof` from {} at block: {at} - to `{:?}` for offender: {:?}, set_id: {set_id}, at block: {at}", - C::NAME, - ::type_info().path, - offender.clone(), - )))?; - - Ok(key_owner_proof) - } -} diff --git a/relays/lib-substrate-relay/src/finality_base/mod.rs b/relays/lib-substrate-relay/src/finality_base/mod.rs deleted file mode 100644 index 825960b1b3ef2cc4f73b7565d6a2c8fe3e30fdd9..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/finality_base/mod.rs +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2019-2023 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types and functions intended to ease adding of new Substrate -> Substrate -//! finality pipelines. - -pub mod engine; - -use crate::finality_base::engine::Engine; - -use async_trait::async_trait; -use bp_runtime::{HashOf, HeaderIdOf}; -use codec::Decode; -use futures::{stream::unfold, Stream, StreamExt}; -use relay_substrate_client::{Chain, Client, Error}; -use std::{fmt::Debug, pin::Pin}; - -/// Substrate -> Substrate finality related pipeline. -#[async_trait] -pub trait SubstrateFinalityPipeline: 'static + Clone + Debug + Send + Sync { - /// Headers of this chain are submitted to the `TargetChain`. - type SourceChain: Chain; - /// Headers of the `SourceChain` are submitted to this chain. - type TargetChain: Chain; - /// Finality engine. - type FinalityEngine: Engine; -} - -/// Substrate finality proof. Specific to the used `FinalityEngine`. -pub type SubstrateFinalityProof

= <

::FinalityEngine as Engine< -

::SourceChain, ->>::FinalityProof; - -/// Substrate finality proofs stream. -pub type SubstrateFinalityProofsStream

= - Pin> + Send>>; - -/// Subscribe to new finality proofs. -pub async fn finality_proofs( - client: &Client, -) -> Result, Error> { - Ok(unfold( - P::FinalityEngine::source_finality_proofs(client).await?, - move |subscription| async move { - loop { - let log_error = |err| { - log::error!( - target: "bridge", - "Failed to read justification target from the {} justifications stream: {:?}", - P::SourceChain::NAME, - err, - ); - }; - - let next_justification = - subscription.next().await.map_err(|err| log_error(err.to_string())).ok()??; - - let decoded_justification = - >::FinalityProof::decode( - &mut &next_justification[..], - ); - - let justification = match decoded_justification { - Ok(j) => j, - Err(err) => { - log_error(format!("decode failed with error {err:?}")); - continue - }, - }; - - return Some((justification, subscription)) - } - }, - ) - .boxed()) -} - -/// Get the id of the best `SourceChain` header known to the `TargetChain` at the provided -/// target block using the exposed runtime API method. -/// -/// The runtime API method should be `FinalityApi::best_finalized()`. -pub async fn best_synced_header_id( - target_client: &Client, - at: HashOf, -) -> Result>, Error> -where - SourceChain: Chain, - TargetChain: Chain, -{ - // now let's read id of best finalized peer header at our best finalized block - target_client - .typed_state_call(SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), Some(at)) - .await -} diff --git a/relays/lib-substrate-relay/src/lib.rs b/relays/lib-substrate-relay/src/lib.rs deleted file mode 100644 index ea818264c42d9e669d0ffafff4504f5ae0d9cb1a..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/lib.rs +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! The library of substrate relay. contains some public codes to provide to substrate relay. - -#![warn(missing_docs)] - -use relay_substrate_client::{Chain, ChainWithUtilityPallet, UtilityPallet}; - -use std::marker::PhantomData; - -// to avoid `finality_relay` dependency in other crates -pub use finality_relay::HeadersToRelay; - -pub mod equivocation; -pub mod error; -pub mod finality; -pub mod finality_base; -pub mod messages_lane; -pub mod messages_metrics; -pub mod messages_source; -pub mod messages_target; -pub mod on_demand; -pub mod parachains; - -/// Transaction creation parameters. -#[derive(Clone, Debug)] -pub struct TransactionParams { - /// Transactions author. - pub signer: TS, - /// Transactions mortality. - pub mortality: Option, -} - -/// Tagged relay account, which balance may be exposed as metrics by the relay. -#[derive(Clone, Debug)] -pub enum TaggedAccount { - /// Account, used to sign message (also headers and parachains) relay transactions from given - /// bridged chain. - Messages { - /// Account id. - id: AccountId, - /// Name of the bridged chain, which sends us messages or delivery confirmations. - bridged_chain: String, - }, -} - -impl TaggedAccount { - /// Returns reference to the account id. - pub fn id(&self) -> &AccountId { - match *self { - TaggedAccount::Messages { ref id, .. } => id, - } - } - - /// Returns stringified account tag. - pub fn tag(&self) -> String { - match *self { - TaggedAccount::Messages { ref bridged_chain, .. } => { - format!("{bridged_chain}Messages") - }, - } - } -} - -/// Batch call builder. -pub trait BatchCallBuilder: Clone + Send + Sync { - /// Create batch call from given calls vector. - fn build_batch_call(&self, _calls: Vec) -> Call; -} - -/// Batch call builder constructor. -pub trait BatchCallBuilderConstructor: Clone { - /// Call builder, used by this constructor. - type CallBuilder: BatchCallBuilder; - /// Create a new instance of a batch call builder. - fn new_builder() -> Option; -} - -/// Batch call builder based on `pallet-utility`. -#[derive(Clone)] -pub struct UtilityPalletBatchCallBuilder(PhantomData); - -impl BatchCallBuilder for UtilityPalletBatchCallBuilder -where - C: ChainWithUtilityPallet, -{ - fn build_batch_call(&self, calls: Vec) -> C::Call { - C::UtilityPallet::build_batch_call(calls) - } -} - -impl BatchCallBuilderConstructor for UtilityPalletBatchCallBuilder -where - C: ChainWithUtilityPallet, -{ - type CallBuilder = Self; - - fn new_builder() -> Option { - Some(Self(Default::default())) - } -} - -// A `BatchCallBuilderConstructor` that always returns `None`. -impl BatchCallBuilderConstructor for () { - type CallBuilder = (); - fn new_builder() -> Option { - None - } -} - -// Dummy `BatchCallBuilder` implementation that must never be used outside -// of the `impl BatchCallBuilderConstructor for ()` code. -impl BatchCallBuilder for () { - fn build_batch_call(&self, _calls: Vec) -> Call { - unreachable!("never called, because ()::new_builder() returns None; qed") - } -} diff --git a/relays/lib-substrate-relay/src/messages_lane.rs b/relays/lib-substrate-relay/src/messages_lane.rs deleted file mode 100644 index abeab8c1402d677923e480f65e869c4496feaf8b..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/messages_lane.rs +++ /dev/null @@ -1,587 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools for supporting message lanes between two Substrate-based chains. - -use crate::{ - messages_source::{SubstrateMessagesProof, SubstrateMessagesSource}, - messages_target::{SubstrateMessagesDeliveryProof, SubstrateMessagesTarget}, - on_demand::OnDemandRelay, - BatchCallBuilder, BatchCallBuilderConstructor, TransactionParams, -}; - -use async_std::sync::Arc; -use bp_messages::{ChainWithMessages as _, LaneId, MessageNonce}; -use bp_runtime::{ - AccountIdOf, Chain as _, EncodedOrDecodedCall, HeaderIdOf, TransactionEra, WeightExtraOps, -}; -use bridge_runtime_common::messages::{ - source::FromBridgedChainMessagesDeliveryProof, target::FromBridgedChainMessagesProof, -}; -use codec::Encode; -use frame_support::{dispatch::GetDispatchInfo, weights::Weight}; -use messages_relay::{message_lane::MessageLane, message_lane_loop::BatchTransaction}; -use pallet_bridge_messages::{Call as BridgeMessagesCall, Config as BridgeMessagesConfig}; -use relay_substrate_client::{ - transaction_stall_timeout, AccountKeyPairOf, BalanceOf, BlockNumberOf, CallOf, Chain, - ChainWithMessages, ChainWithTransactions, Client, Error as SubstrateError, HashOf, SignParam, - UnsignedTransaction, -}; -use relay_utils::{ - metrics::{GlobalMetrics, MetricsParams, StandaloneMetric}, - STALL_TIMEOUT, -}; -use sp_core::Pair; -use sp_runtime::traits::Zero; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; - -/// Substrate -> Substrate messages synchronization pipeline. -pub trait SubstrateMessageLane: 'static + Clone + Debug + Send + Sync { - /// Messages of this chain are relayed to the `TargetChain`. - type SourceChain: ChainWithMessages + ChainWithTransactions; - /// Messages from the `SourceChain` are dispatched on this chain. - type TargetChain: ChainWithMessages + ChainWithTransactions; - - /// How receive messages proof call is built? - type ReceiveMessagesProofCallBuilder: ReceiveMessagesProofCallBuilder; - /// How receive messages delivery proof call is built? - type ReceiveMessagesDeliveryProofCallBuilder: ReceiveMessagesDeliveryProofCallBuilder; - - /// How batch calls are built at the source chain? - type SourceBatchCallBuilder: BatchCallBuilderConstructor>; - /// How batch calls are built at the target chain? - type TargetBatchCallBuilder: BatchCallBuilderConstructor>; -} - -/// Adapter that allows all `SubstrateMessageLane` to act as `MessageLane`. -#[derive(Clone, Debug)] -pub struct MessageLaneAdapter { - _phantom: PhantomData

, -} - -impl MessageLane for MessageLaneAdapter

{ - const SOURCE_NAME: &'static str = P::SourceChain::NAME; - const TARGET_NAME: &'static str = P::TargetChain::NAME; - - type MessagesProof = SubstrateMessagesProof; - type MessagesReceivingProof = SubstrateMessagesDeliveryProof; - - type SourceChainBalance = BalanceOf; - type SourceHeaderNumber = BlockNumberOf; - type SourceHeaderHash = HashOf; - - type TargetHeaderNumber = BlockNumberOf; - type TargetHeaderHash = HashOf; -} - -/// Substrate <-> Substrate messages relay parameters. -pub struct MessagesRelayParams { - /// Messages source client. - pub source_client: Client, - /// Source transaction params. - pub source_transaction_params: TransactionParams>, - /// Messages target client. - pub target_client: Client, - /// Target transaction params. - pub target_transaction_params: TransactionParams>, - /// Optional on-demand source to target headers relay. - pub source_to_target_headers_relay: - Option>>, - /// Optional on-demand target to source headers relay. - pub target_to_source_headers_relay: - Option>>, - /// Identifier of lane that needs to be served. - pub lane_id: LaneId, - /// Messages relay limits. If not provided, the relay tries to determine it automatically, - /// using `TransactionPayment` pallet runtime API. - pub limits: Option, - /// Metrics parameters. - pub metrics_params: MetricsParams, -} - -/// Delivery transaction limits. -pub struct MessagesRelayLimits { - /// Maximal number of messages in the delivery transaction. - pub max_messages_in_single_batch: MessageNonce, - /// Maximal cumulative weight of messages in the delivery transaction. - pub max_messages_weight_in_single_batch: Weight, -} - -/// Batch transaction that brings headers + and messages delivery/receiving confirmations to the -/// source node. -#[derive(Clone)] -pub struct BatchProofTransaction>> { - builder: B::CallBuilder, - proved_header: HeaderIdOf, - prove_calls: Vec>, - - /// Using `fn() -> B` in order to avoid implementing `Send` for `B`. - _phantom: PhantomData B>, -} - -impl>> std::fmt::Debug - for BatchProofTransaction -{ - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("BatchProofTransaction") - .field("proved_header", &self.proved_header) - .finish() - } -} - -impl>> - BatchProofTransaction -{ - /// Creates a new instance of `BatchProofTransaction`. - pub async fn new( - relay: Arc>, - block_num: BlockNumberOf, - ) -> Result, SubstrateError> { - if let Some(builder) = B::new_builder() { - let (proved_header, prove_calls) = relay.prove_header(block_num).await?; - return Ok(Some(Self { - builder, - proved_header, - prove_calls, - _phantom: Default::default(), - })) - } - - Ok(None) - } - - /// Return a batch call that includes the provided call. - pub fn append_call_and_build(mut self, call: CallOf) -> CallOf { - self.prove_calls.push(call); - self.builder.build_batch_call(self.prove_calls) - } -} - -impl>> - BatchTransaction> for BatchProofTransaction -{ - fn required_header_id(&self) -> HeaderIdOf { - self.proved_header - } -} - -/// Run Substrate-to-Substrate messages sync loop. -pub async fn run(params: MessagesRelayParams

) -> anyhow::Result<()> -where - AccountIdOf: From< as Pair>::Public>, - AccountIdOf: From< as Pair>::Public>, - BalanceOf: TryFrom>, -{ - // 2/3 is reserved for proofs and tx overhead - let max_messages_size_in_single_batch = P::TargetChain::max_extrinsic_size() / 3; - let limits = match params.limits { - Some(limits) => limits, - None => - select_delivery_transaction_limits_rpc::

( - ¶ms, - P::TargetChain::max_extrinsic_weight(), - P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - ) - .await?, - }; - let (max_messages_in_single_batch, max_messages_weight_in_single_batch) = - (limits.max_messages_in_single_batch / 2, limits.max_messages_weight_in_single_batch / 2); - - let source_client = params.source_client; - let target_client = params.target_client; - let relayer_id_at_source: AccountIdOf = - params.source_transaction_params.signer.public().into(); - - log::info!( - target: "bridge", - "Starting {} -> {} messages relay.\n\t\ - {} relayer account id: {:?}\n\t\ - Max messages in single transaction: {}\n\t\ - Max messages size in single transaction: {}\n\t\ - Max messages weight in single transaction: {}\n\t\ - Tx mortality: {:?} (~{}m)/{:?} (~{}m)", - P::SourceChain::NAME, - P::TargetChain::NAME, - P::SourceChain::NAME, - relayer_id_at_source, - max_messages_in_single_batch, - max_messages_size_in_single_batch, - max_messages_weight_in_single_batch, - params.source_transaction_params.mortality, - transaction_stall_timeout( - params.source_transaction_params.mortality, - P::SourceChain::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ).as_secs_f64() / 60.0f64, - params.target_transaction_params.mortality, - transaction_stall_timeout( - params.target_transaction_params.mortality, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ).as_secs_f64() / 60.0f64, - ); - - messages_relay::message_lane_loop::run( - messages_relay::message_lane_loop::Params { - lane: params.lane_id, - source_tick: P::SourceChain::AVERAGE_BLOCK_INTERVAL, - target_tick: P::TargetChain::AVERAGE_BLOCK_INTERVAL, - reconnect_delay: relay_utils::relay_loop::RECONNECT_DELAY, - delivery_params: messages_relay::message_lane_loop::MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: - P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, - max_unconfirmed_nonces_at_target: - P::SourceChain::MAX_UNCONFIRMED_MESSAGES_IN_CONFIRMATION_TX, - max_messages_in_single_batch, - max_messages_weight_in_single_batch, - max_messages_size_in_single_batch, - }, - }, - SubstrateMessagesSource::

::new( - source_client.clone(), - target_client.clone(), - params.lane_id, - params.source_transaction_params, - params.target_to_source_headers_relay, - ), - SubstrateMessagesTarget::

::new( - target_client, - source_client, - params.lane_id, - relayer_id_at_source, - params.target_transaction_params, - params.source_to_target_headers_relay, - ), - { - GlobalMetrics::new()?.register_and_spawn(¶ms.metrics_params.registry)?; - params.metrics_params - }, - futures::future::pending(), - ) - .await - .map_err(Into::into) -} - -/// Different ways of building `receive_messages_proof` calls. -pub trait ReceiveMessagesProofCallBuilder { - /// Given messages proof, build call of `receive_messages_proof` function of bridge - /// messages module at the target chain. - fn build_receive_messages_proof_call( - relayer_id_at_source: AccountIdOf, - proof: SubstrateMessagesProof, - messages_count: u32, - dispatch_weight: Weight, - trace_call: bool, - ) -> CallOf; -} - -/// Building `receive_messages_proof` call when you have direct access to the target -/// chain runtime. -pub struct DirectReceiveMessagesProofCallBuilder { - _phantom: PhantomData<(P, R, I)>, -} - -impl ReceiveMessagesProofCallBuilder

for DirectReceiveMessagesProofCallBuilder -where - P: SubstrateMessageLane, - R: BridgeMessagesConfig>, - I: 'static, - R::SourceHeaderChain: bp_messages::target_chain::SourceHeaderChain< - MessagesProof = FromBridgedChainMessagesProof>, - >, - CallOf: From> + GetDispatchInfo, -{ - fn build_receive_messages_proof_call( - relayer_id_at_source: AccountIdOf, - proof: SubstrateMessagesProof, - messages_count: u32, - dispatch_weight: Weight, - trace_call: bool, - ) -> CallOf { - let call: CallOf = BridgeMessagesCall::::receive_messages_proof { - relayer_id_at_bridged_chain: relayer_id_at_source, - proof: proof.1, - messages_count, - dispatch_weight, - } - .into(); - if trace_call { - // this trace isn't super-accurate, because limits are for transactions and we - // have a call here, but it provides required information - log::trace!( - target: "bridge", - "Prepared {} -> {} messages delivery call. Weight: {}/{}, size: {}/{}", - P::SourceChain::NAME, - P::TargetChain::NAME, - call.get_dispatch_info().weight, - P::TargetChain::max_extrinsic_weight(), - call.encode().len(), - P::TargetChain::max_extrinsic_size(), - ); - } - call - } -} - -/// Macro that generates `ReceiveMessagesProofCallBuilder` implementation for the case when -/// you only have an access to the mocked version of target chain runtime. In this case you -/// should provide "name" of the call variant for the bridge messages calls and the "name" of -/// the variant for the `receive_messages_proof` call within that first option. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_receive_message_proof_call_builder { - ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_proof:path) => { - pub struct $mocked_builder; - - impl $crate::messages_lane::ReceiveMessagesProofCallBuilder<$pipeline> - for $mocked_builder - { - fn build_receive_messages_proof_call( - relayer_id_at_source: relay_substrate_client::AccountIdOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain - >, - proof: $crate::messages_source::SubstrateMessagesProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain - >, - messages_count: u32, - dispatch_weight: bp_messages::Weight, - _trace_call: bool, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain - > { - bp_runtime::paste::item! { - $bridge_messages($receive_messages_proof { - relayer_id_at_bridged_chain: relayer_id_at_source, - proof: proof.1, - messages_count: messages_count, - dispatch_weight: dispatch_weight, - }) - } - } - } - }; -} - -/// Different ways of building `receive_messages_delivery_proof` calls. -pub trait ReceiveMessagesDeliveryProofCallBuilder { - /// Given messages delivery proof, build call of `receive_messages_delivery_proof` function of - /// bridge messages module at the source chain. - fn build_receive_messages_delivery_proof_call( - proof: SubstrateMessagesDeliveryProof, - trace_call: bool, - ) -> CallOf; -} - -/// Building `receive_messages_delivery_proof` call when you have direct access to the source -/// chain runtime. -pub struct DirectReceiveMessagesDeliveryProofCallBuilder { - _phantom: PhantomData<(P, R, I)>, -} - -impl ReceiveMessagesDeliveryProofCallBuilder

- for DirectReceiveMessagesDeliveryProofCallBuilder -where - P: SubstrateMessageLane, - R: BridgeMessagesConfig, - I: 'static, - R::TargetHeaderChain: bp_messages::source_chain::TargetHeaderChain< - R::OutboundPayload, - R::AccountId, - MessagesDeliveryProof = FromBridgedChainMessagesDeliveryProof>, - >, - CallOf: From> + GetDispatchInfo, -{ - fn build_receive_messages_delivery_proof_call( - proof: SubstrateMessagesDeliveryProof, - trace_call: bool, - ) -> CallOf { - let call: CallOf = - BridgeMessagesCall::::receive_messages_delivery_proof { - proof: proof.1, - relayers_state: proof.0, - } - .into(); - if trace_call { - // this trace isn't super-accurate, because limits are for transactions and we - // have a call here, but it provides required information - log::trace!( - target: "bridge", - "Prepared {} -> {} delivery confirmation transaction. Weight: {}/{}, size: {}/{}", - P::TargetChain::NAME, - P::SourceChain::NAME, - call.get_dispatch_info().weight, - P::SourceChain::max_extrinsic_weight(), - call.encode().len(), - P::SourceChain::max_extrinsic_size(), - ); - } - call - } -} - -/// Macro that generates `ReceiveMessagesDeliveryProofCallBuilder` implementation for the case when -/// you only have an access to the mocked version of source chain runtime. In this case you -/// should provide "name" of the call variant for the bridge messages calls and the "name" of -/// the variant for the `receive_messages_delivery_proof` call within that first option. -#[rustfmt::skip] -#[macro_export] -macro_rules! generate_receive_message_delivery_proof_call_builder { - ($pipeline:ident, $mocked_builder:ident, $bridge_messages:path, $receive_messages_delivery_proof:path) => { - pub struct $mocked_builder; - - impl $crate::messages_lane::ReceiveMessagesDeliveryProofCallBuilder<$pipeline> - for $mocked_builder - { - fn build_receive_messages_delivery_proof_call( - proof: $crate::messages_target::SubstrateMessagesDeliveryProof< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::TargetChain - >, - _trace_call: bool, - ) -> relay_substrate_client::CallOf< - <$pipeline as $crate::messages_lane::SubstrateMessageLane>::SourceChain - > { - bp_runtime::paste::item! { - $bridge_messages($receive_messages_delivery_proof { - proof: proof.1, - relayers_state: proof.0 - }) - } - } - } - }; -} - -/// Returns maximal number of messages and their maximal cumulative dispatch weight. -async fn select_delivery_transaction_limits_rpc( - params: &MessagesRelayParams

, - max_extrinsic_weight: Weight, - max_unconfirmed_messages_at_inbound_lane: MessageNonce, -) -> anyhow::Result -where - AccountIdOf: From< as Pair>::Public>, -{ - // We may try to guess accurate value, based on maximal number of messages and per-message - // weight overhead, but the relay loop isn't using this info in a super-accurate way anyway. - // So just a rough guess: let's say 1/3 of max tx weight is for tx itself and the rest is - // for messages dispatch. - - // Another thing to keep in mind is that our runtimes (when this code was written) accept - // messages with dispatch weight <= max_extrinsic_weight/2. So we can't reserve less than - // that for dispatch. - - let weight_for_delivery_tx = max_extrinsic_weight / 3; - let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; - - // weight of empty message delivery with outbound lane state - let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::

(params, 0)?; - let delivery_tx_with_zero_messages_weight = params - .target_client - .extimate_extrinsic_weight(delivery_tx_with_zero_messages) - .await - .map_err(|e| { - anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) - })?; - - // weight of single message delivery with outbound lane state - let delivery_tx_with_one_message = dummy_messages_delivery_transaction::

(params, 1)?; - let delivery_tx_with_one_message_weight = params - .target_client - .extimate_extrinsic_weight(delivery_tx_with_one_message) - .await - .map_err(|e| { - anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) - })?; - - // message overhead is roughly `delivery_tx_with_one_message_weight - - // delivery_tx_with_zero_messages_weight` - let delivery_tx_weight_rest = weight_for_delivery_tx - delivery_tx_with_zero_messages_weight; - let delivery_tx_message_overhead = - delivery_tx_with_one_message_weight.saturating_sub(delivery_tx_with_zero_messages_weight); - - let max_number_of_messages = std::cmp::min( - delivery_tx_weight_rest - .min_components_checked_div(delivery_tx_message_overhead) - .unwrap_or(u64::MAX), - max_unconfirmed_messages_at_inbound_lane, - ); - - assert!( - max_number_of_messages > 0, - "Relay should fit at least one message in every delivery transaction", - ); - assert!( - weight_for_messages_dispatch.ref_time() >= max_extrinsic_weight.ref_time() / 2, - "Relay shall be able to deliver messages with dispatch weight = max_extrinsic_weight / 2", - ); - - Ok(MessagesRelayLimits { - max_messages_in_single_batch: max_number_of_messages, - max_messages_weight_in_single_batch: weight_for_messages_dispatch, - }) -} - -/// Returns dummy message delivery transaction with zero messages and `1kb` proof. -fn dummy_messages_delivery_transaction( - params: &MessagesRelayParams

, - messages: u32, -) -> anyhow::Result<::SignedTransaction> -where - AccountIdOf: From< as Pair>::Public>, -{ - // we don't care about any call values here, because all that the estimation RPC does - // is calls `GetDispatchInfo::get_dispatch_info` for the wrapped call. So we only are - // interested in values that affect call weight - e.g. number of messages and the - // storage proof size - - let dummy_messages_delivery_call = - P::ReceiveMessagesProofCallBuilder::build_receive_messages_proof_call( - params.source_transaction_params.signer.public().into(), - ( - Weight::zero(), - FromBridgedChainMessagesProof { - bridged_header_hash: Default::default(), - // we may use per-chain `EXTRA_STORAGE_PROOF_SIZE`, but since we don't need - // exact values, this global estimation is fine - storage_proof: vec![vec![ - 42u8; - pallet_bridge_messages::EXTRA_STORAGE_PROOF_SIZE - as usize - ]], - lane: Default::default(), - nonces_start: 1, - nonces_end: messages as u64, - }, - ), - messages, - Weight::zero(), - false, - ); - P::TargetChain::sign_transaction( - SignParam { - spec_version: 0, - transaction_version: 0, - genesis_hash: Default::default(), - signer: params.target_transaction_params.signer.clone(), - }, - UnsignedTransaction { - call: EncodedOrDecodedCall::Decoded(dummy_messages_delivery_call), - nonce: Zero::zero(), - tip: Zero::zero(), - era: TransactionEra::Immortal, - }, - ) - .map_err(Into::into) -} diff --git a/relays/lib-substrate-relay/src/messages_metrics.rs b/relays/lib-substrate-relay/src/messages_metrics.rs deleted file mode 100644 index 27bf6186c3ba0d0db6552128574c8759d45d220c..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/messages_metrics.rs +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Tools for supporting message lanes between two Substrate-based chains. - -use crate::TaggedAccount; - -use bp_messages::LaneId; -use bp_relayers::{RewardsAccountOwner, RewardsAccountParams}; -use bp_runtime::StorageDoubleMapKeyProvider; -use codec::Decode; -use frame_system::AccountInfo; -use pallet_balances::AccountData; -use relay_substrate_client::{ - metrics::{FloatStorageValue, FloatStorageValueMetric}, - AccountIdOf, BalanceOf, Chain, ChainWithBalances, ChainWithMessages, Client, - Error as SubstrateError, NonceOf, -}; -use relay_utils::metrics::{MetricsParams, StandaloneMetric}; -use sp_core::storage::StorageData; -use sp_runtime::{FixedPointNumber, FixedU128}; -use std::{convert::TryFrom, fmt::Debug, marker::PhantomData}; - -/// Add relay accounts balance metrics. -pub async fn add_relay_balances_metrics( - client: Client, - metrics: &MetricsParams, - relay_accounts: &Vec>>, - lanes: &[LaneId], -) -> anyhow::Result<()> -where - BalanceOf: Into + std::fmt::Debug, -{ - if relay_accounts.is_empty() { - return Ok(()) - } - - // if `tokenDecimals` is missing from system properties, we'll be using - let token_decimals = client - .token_decimals() - .await? - .map(|token_decimals| { - log::info!(target: "bridge", "Read `tokenDecimals` for {}: {}", C::NAME, token_decimals); - token_decimals - }) - .unwrap_or_else(|| { - // turns out it is normal not to have this property - e.g. when polkadot binary is - // started using `polkadot-local` chain. Let's use minimal nominal here - log::info!(target: "bridge", "Using default (zero) `tokenDecimals` value for {}", C::NAME); - 0 - }); - let token_decimals = u32::try_from(token_decimals).map_err(|e| { - anyhow::format_err!( - "Token decimals value ({}) of {} doesn't fit into u32: {:?}", - token_decimals, - C::NAME, - e, - ) - })?; - - for account in relay_accounts { - let relay_account_balance_metric = FloatStorageValueMetric::new( - AccountBalanceFromAccountInfo:: { token_decimals, _phantom: Default::default() }, - client.clone(), - C::account_info_storage_key(account.id()), - format!("at_{}_relay_{}_balance", C::NAME, account.tag()), - format!("Balance of the {} relay account at the {}", account.tag(), C::NAME), - )?; - relay_account_balance_metric.register_and_spawn(&metrics.registry)?; - - if let Some(relayers_pallet_name) = BC::WITH_CHAIN_RELAYERS_PALLET_NAME { - for lane in lanes { - FloatStorageValueMetric::new( - AccountBalance:: { token_decimals, _phantom: Default::default() }, - client.clone(), - bp_relayers::RelayerRewardsKeyProvider::, BalanceOf>::final_key( - relayers_pallet_name, - account.id(), - &RewardsAccountParams::new(*lane, BC::ID, RewardsAccountOwner::ThisChain), - ), - format!("at_{}_relay_{}_reward_for_msgs_from_{}_on_lane_{}", C::NAME, account.tag(), BC::NAME, hex::encode(lane.as_ref())), - format!("Reward of the {} relay account at {} for delivering messages from {} on lane {:?}", account.tag(), C::NAME, BC::NAME, lane), - )?.register_and_spawn(&metrics.registry)?; - - FloatStorageValueMetric::new( - AccountBalance:: { token_decimals, _phantom: Default::default() }, - client.clone(), - bp_relayers::RelayerRewardsKeyProvider::, BalanceOf>::final_key( - relayers_pallet_name, - account.id(), - &RewardsAccountParams::new(*lane, BC::ID, RewardsAccountOwner::BridgedChain), - ), - format!("at_{}_relay_{}_reward_for_msgs_to_{}_on_lane_{}", C::NAME, account.tag(), BC::NAME, hex::encode(lane.as_ref())), - format!("Reward of the {} relay account at {} for delivering messages confirmations from {} on lane {:?}", account.tag(), C::NAME, BC::NAME, lane), - )?.register_and_spawn(&metrics.registry)?; - } - } - } - - Ok(()) -} - -/// Adapter for `FloatStorageValueMetric` to decode account free balance. -#[derive(Clone, Debug)] -struct AccountBalanceFromAccountInfo { - token_decimals: u32, - _phantom: PhantomData, -} - -impl FloatStorageValue for AccountBalanceFromAccountInfo -where - C: Chain, - BalanceOf: Into, -{ - type Value = FixedU128; - - fn decode( - &self, - maybe_raw_value: Option, - ) -> Result, SubstrateError> { - maybe_raw_value - .map(|raw_value| { - AccountInfo::, AccountData>>::decode(&mut &raw_value.0[..]) - .map_err(SubstrateError::ResponseParseFailed) - .map(|account_data| { - convert_to_token_balance(account_data.data.free.into(), self.token_decimals) - }) - }) - .transpose() - } -} - -/// Adapter for `FloatStorageValueMetric` to decode account free balance. -#[derive(Clone, Debug)] -struct AccountBalance { - token_decimals: u32, - _phantom: PhantomData, -} - -impl FloatStorageValue for AccountBalance -where - C: Chain, - BalanceOf: Into, -{ - type Value = FixedU128; - - fn decode( - &self, - maybe_raw_value: Option, - ) -> Result, SubstrateError> { - maybe_raw_value - .map(|raw_value| { - BalanceOf::::decode(&mut &raw_value.0[..]) - .map_err(SubstrateError::ResponseParseFailed) - .map(|balance| convert_to_token_balance(balance.into(), self.token_decimals)) - }) - .transpose() - } -} - -/// Convert from raw `u128` balance (nominated in smallest chain token units) to the float regular -/// tokens value. -fn convert_to_token_balance(balance: u128, token_decimals: u32) -> FixedU128 { - FixedU128::from_inner(balance.saturating_mul(FixedU128::DIV / 10u128.pow(token_decimals))) -} - -#[cfg(test)] -mod tests { - use super::*; - #[test] - fn token_decimals_used_properly() { - let plancks = 425_000_000_000; - let token_decimals = 10; - let dots = convert_to_token_balance(plancks, token_decimals); - assert_eq!(dots, FixedU128::saturating_from_rational(425, 10)); - } -} diff --git a/relays/lib-substrate-relay/src/messages_source.rs b/relays/lib-substrate-relay/src/messages_source.rs deleted file mode 100644 index 26e10f8868ceafa67426673fa43f59e89755d842..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/messages_source.rs +++ /dev/null @@ -1,723 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate messages source. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! `` chain. - -use crate::{ - finality_base::best_synced_header_id, - messages_lane::{ - BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesDeliveryProofCallBuilder, - SubstrateMessageLane, - }, - on_demand::OnDemandRelay, - TransactionParams, -}; - -use async_std::sync::Arc; -use async_trait::async_trait; -use bp_messages::{ - storage_keys::{operating_mode_key, outbound_lane_data_key}, - ChainWithMessages as _, InboundMessageDetails, LaneId, MessageNonce, MessagePayload, - MessagesOperatingMode, OutboundLaneData, OutboundMessageDetails, -}; -use bp_runtime::{BasicOperatingMode, HeaderIdProvider}; -use bridge_runtime_common::messages::target::FromBridgedChainMessagesProof; -use codec::Encode; -use frame_support::weights::Weight; -use messages_relay::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{ - ClientState, MessageDetails, MessageDetailsMap, MessageProofParameters, SourceClient, - SourceClientState, - }, -}; -use num_traits::Zero; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithMessages, Client, - Error as SubstrateError, HashOf, HeaderIdOf, TransactionEra, TransactionTracker, - UnsignedTransaction, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_core::Pair; -use std::ops::RangeInclusive; - -/// Intermediate message proof returned by the source Substrate node. Includes everything -/// required to submit to the target node: cumulative dispatch weight of bundled messages and -/// the proof itself. -pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof>); -type MessagesToRefine<'a> = Vec<(MessagePayload, &'a mut OutboundMessageDetails)>; - -/// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - source_client: Client, - target_client: Client, - lane_id: LaneId, - transaction_params: TransactionParams>, - target_to_source_headers_relay: Option>>, -} - -impl SubstrateMessagesSource

{ - /// Create new Substrate headers source. - pub fn new( - source_client: Client, - target_client: Client, - lane_id: LaneId, - transaction_params: TransactionParams>, - target_to_source_headers_relay: Option< - Arc>, - >, - ) -> Self { - SubstrateMessagesSource { - source_client, - target_client, - lane_id, - transaction_params, - target_to_source_headers_relay, - } - } - - /// Read outbound lane state from the on-chain storage at given block. - async fn outbound_lane_data( - &self, - id: SourceHeaderIdOf>, - ) -> Result, SubstrateError> { - self.source_client - .storage_value( - outbound_lane_data_key( - P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - ), - Some(id.1), - ) - .await - } - - /// Ensure that the messages pallet at source chain is active. - async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.source_client).await - } -} - -impl Clone for SubstrateMessagesSource

{ - fn clone(&self) -> Self { - Self { - source_client: self.source_client.clone(), - target_client: self.target_client.clone(), - lane_id: self.lane_id, - transaction_params: self.transaction_params.clone(), - target_to_source_headers_relay: self.target_to_source_headers_relay.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateMessagesSource

{ - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - // since the client calls RPC methods on both sides, we need to reconnect both - self.source_client.reconnect().await?; - self.target_client.reconnect().await?; - - // call reconnect on on-demand headers relay, because we may use different chains there - // and the error that has lead to reconnect may have came from those other chains - // (see `require_target_header_on_source`) - // - // this may lead to multiple reconnects to the same node during the same call and it - // needs to be addressed in the future - // TODO: https://github.com/paritytech/parity-bridges-common/issues/1928 - if let Some(ref mut target_to_source_headers_relay) = self.target_to_source_headers_relay { - target_to_source_headers_relay.reconnect().await?; - } - - Ok(()) - } -} - -#[async_trait] -impl SourceClient> for SubstrateMessagesSource

-where - AccountIdOf: From< as Pair>::Public>, -{ - type BatchTransaction = - BatchProofTransaction; - type TransactionTracker = TransactionTracker>; - - async fn state(&self) -> Result>, SubstrateError> { - // we can't continue to deliver confirmations if source node is out of sync, because - // it may have already received confirmations that we're going to deliver - // - // we can't continue to deliver messages if target node is out of sync, because - // it may have already received (some of) messages that we're going to deliver - self.source_client.ensure_synced().await?; - self.target_client.ensure_synced().await?; - // we can't relay confirmations if messages pallet at source chain is halted - self.ensure_pallet_active().await?; - - read_client_state(&self.source_client, Some(&self.target_client)).await - } - - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf>, - ) -> Result<(SourceHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is sent - let latest_generated_nonce = self - .outbound_lane_data(id) - .await? - .map(|data| data.latest_generated_nonce) - .unwrap_or(0); - Ok((id, latest_generated_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf>, - ) -> Result<(SourceHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is sent - let latest_received_nonce = self - .outbound_lane_data(id) - .await? - .map(|data| data.latest_received_nonce) - .unwrap_or(0); - Ok((id, latest_received_nonce)) - } - - async fn generated_message_details( - &self, - id: SourceHeaderIdOf>, - nonces: RangeInclusive, - ) -> Result>, SubstrateError> { - let mut out_msgs_details = self - .source_client - .typed_state_call::<_, Vec<_>>( - P::TargetChain::TO_CHAIN_MESSAGE_DETAILS_METHOD.into(), - (self.lane_id, *nonces.start(), *nonces.end()), - Some(id.1), - ) - .await?; - validate_out_msgs_details::(&out_msgs_details, nonces)?; - - // prepare arguments of the inbound message details call (if we need it) - let mut msgs_to_refine = vec![]; - for out_msg_details in out_msgs_details.iter_mut() { - // in our current strategy all messages are supposed to be paid at the target chain - - // for pay-at-target messages we may want to ask target chain for - // refined dispatch weight - let msg_key = bp_messages::storage_keys::message_key( - P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - out_msg_details.nonce, - ); - let msg_payload: MessagePayload = - self.source_client.storage_value(msg_key, Some(id.1)).await?.ok_or_else(|| { - SubstrateError::Custom(format!( - "Message to {} {:?}/{} is missing from runtime the storage of {} at {:?}", - P::TargetChain::NAME, - self.lane_id, - out_msg_details.nonce, - P::SourceChain::NAME, - id, - )) - })?; - - msgs_to_refine.push((msg_payload, out_msg_details)); - } - - for mut msgs_to_refine_batch in - split_msgs_to_refine::(self.lane_id, msgs_to_refine)? - { - let in_msgs_details = self - .target_client - .typed_state_call::<_, Vec>( - P::SourceChain::FROM_CHAIN_MESSAGE_DETAILS_METHOD.into(), - (self.lane_id, &msgs_to_refine_batch), - None, - ) - .await?; - if in_msgs_details.len() != msgs_to_refine_batch.len() { - return Err(SubstrateError::Custom(format!( - "Call of {} at {} has returned {} entries instead of expected {}", - P::SourceChain::FROM_CHAIN_MESSAGE_DETAILS_METHOD, - P::TargetChain::NAME, - in_msgs_details.len(), - msgs_to_refine_batch.len(), - ))) - } - for ((_, out_msg_details), in_msg_details) in - msgs_to_refine_batch.iter_mut().zip(in_msgs_details) - { - log::trace!( - target: "bridge", - "Refined weight of {}->{} message {:?}/{}: at-source: {}, at-target: {}", - P::SourceChain::NAME, - P::TargetChain::NAME, - self.lane_id, - out_msg_details.nonce, - out_msg_details.dispatch_weight, - in_msg_details.dispatch_weight, - ); - out_msg_details.dispatch_weight = in_msg_details.dispatch_weight; - } - } - - let mut msgs_details_map = MessageDetailsMap::new(); - for out_msg_details in out_msgs_details { - msgs_details_map.insert( - out_msg_details.nonce, - MessageDetails { - dispatch_weight: out_msg_details.dispatch_weight, - size: out_msg_details.size as _, - reward: Zero::zero(), - }, - ); - } - - Ok(msgs_details_map) - } - - async fn prove_messages( - &self, - id: SourceHeaderIdOf>, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result< - ( - SourceHeaderIdOf>, - RangeInclusive, - as MessageLane>::MessagesProof, - ), - SubstrateError, - > { - let mut storage_keys = - Vec::with_capacity(nonces.end().saturating_sub(*nonces.start()) as usize + 1); - let mut message_nonce = *nonces.start(); - while message_nonce <= *nonces.end() { - let message_key = bp_messages::storage_keys::message_key( - P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - message_nonce, - ); - storage_keys.push(message_key); - message_nonce += 1; - } - if proof_parameters.outbound_state_proof_required { - storage_keys.push(bp_messages::storage_keys::outbound_lane_data_key( - P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - )); - } - - let proof = self - .source_client - .prove_storage(storage_keys, id.1) - .await? - .into_iter_nodes() - .collect(); - let proof = FromBridgedChainMessagesProof { - bridged_header_hash: id.1, - storage_proof: proof, - lane: self.lane_id, - nonces_start: *nonces.start(), - nonces_end: *nonces.end(), - }; - Ok((id, nonces, (proof_parameters.dispatch_weight, proof))) - } - - async fn submit_messages_receiving_proof( - &self, - maybe_batch_tx: Option, - _generated_at_block: TargetHeaderIdOf>, - proof: as MessageLane>::MessagesReceivingProof, - ) -> Result { - let messages_proof_call = - P::ReceiveMessagesDeliveryProofCallBuilder::build_receive_messages_delivery_proof_call( - proof, - maybe_batch_tx.is_none(), - ); - let final_call = match maybe_batch_tx { - Some(batch_tx) => batch_tx.append_call_and_build(messages_proof_call), - None => messages_proof_call, - }; - - let transaction_params = self.transaction_params.clone(); - self.source_client - .submit_and_watch_signed_extrinsic( - &self.transaction_params.signer, - move |best_block_id, transaction_nonce| { - Ok(UnsignedTransaction::new(final_call.into(), transaction_nonce) - .era(TransactionEra::new(best_block_id, transaction_params.mortality))) - }, - ) - .await - } - - async fn require_target_header_on_source( - &self, - id: TargetHeaderIdOf>, - ) -> Result, SubstrateError> { - if let Some(ref target_to_source_headers_relay) = self.target_to_source_headers_relay { - if let Some(batch_tx) = - BatchProofTransaction::new(target_to_source_headers_relay.clone(), id.0).await? - { - return Ok(Some(batch_tx)) - } - - target_to_source_headers_relay.require_more_headers(id.0).await; - } - - Ok(None) - } -} - -/// Ensure that the messages pallet at source chain is active. -pub(crate) async fn ensure_messages_pallet_active( - client: &Client, -) -> Result<(), SubstrateError> -where - AtChain: ChainWithMessages, - WithChain: ChainWithMessages, -{ - let operating_mode = client - .storage_value(operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), None) - .await?; - let is_halted = - operating_mode == Some(MessagesOperatingMode::Basic(BasicOperatingMode::Halted)); - if is_halted { - Err(SubstrateError::BridgePalletIsHalted) - } else { - Ok(()) - } -} - -/// Read best blocks from given client. -/// -/// This function assumes that the chain that is followed by the `self_client` has -/// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name` -/// runtime API to read the best finalized Bridged chain header. -/// -/// If `peer_client` is `None`, the value of `actual_best_finalized_peer_at_best_self` will -/// always match the `best_finalized_peer_at_best_self`. -pub async fn read_client_state( - self_client: &Client, - peer_client: Option<&Client>, -) -> Result, HeaderIdOf>, SubstrateError> -where - SelfChain: Chain, - PeerChain: Chain, -{ - // let's read our state first: we need best finalized header hash on **this** chain - let self_best_finalized_id = self_client.best_finalized_header().await?.id(); - // now let's read our best header on **this** chain - let self_best_id = self_client.best_header().await?.id(); - - // now let's read id of best finalized peer header at our best finalized block - let peer_on_self_best_finalized_id = - best_synced_header_id::(self_client, self_best_id.hash()).await?; - - // read actual header, matching the `peer_on_self_best_finalized_id` from the peer chain - let actual_peer_on_self_best_finalized_id = - match (peer_client, peer_on_self_best_finalized_id.as_ref()) { - (Some(peer_client), Some(peer_on_self_best_finalized_id)) => { - let actual_peer_on_self_best_finalized = - peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; - Some(actual_peer_on_self_best_finalized.id()) - }, - _ => peer_on_self_best_finalized_id, - }; - - Ok(ClientState { - best_self: self_best_id, - best_finalized_self: self_best_finalized_id, - best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, - actual_best_finalized_peer_at_best_self: actual_peer_on_self_best_finalized_id, - }) -} - -/// Reads best `PeerChain` header known to the `SelfChain` using provided runtime API method. -/// -/// Method is supposed to be the `FinalityApi::best_finalized()` method. -pub async fn best_finalized_peer_header_at_self( - self_client: &Client, - at_self_hash: HashOf, -) -> Result>, SubstrateError> -where - SelfChain: Chain, - PeerChain: Chain, -{ - // now let's read id of best finalized peer header at our best finalized block - self_client - .typed_state_call::<_, Option<_>>( - PeerChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), - (), - Some(at_self_hash), - ) - .await -} - -fn validate_out_msgs_details( - out_msgs_details: &[OutboundMessageDetails], - nonces: RangeInclusive, -) -> Result<(), SubstrateError> { - let make_missing_nonce_error = |expected_nonce| { - Err(SubstrateError::Custom(format!( - "Missing nonce {expected_nonce} in message_details call result. Expected all nonces from {nonces:?}", - ))) - }; - - if out_msgs_details.len() > nonces.clone().count() { - return Err(SubstrateError::Custom( - "More messages than requested returned by the message_details call.".into(), - )) - } - - // Check if last nonce is missing. The loop below is not checking this. - if out_msgs_details.is_empty() && !nonces.is_empty() { - return make_missing_nonce_error(*nonces.end()) - } - - let mut nonces_iter = nonces.clone().rev().peekable(); - let mut out_msgs_details_iter = out_msgs_details.iter().rev(); - while let Some((out_msg_details, &nonce)) = out_msgs_details_iter.next().zip(nonces_iter.peek()) - { - nonces_iter.next(); - if out_msg_details.nonce != nonce { - // Some nonces are missing from the middle/tail of the range. This is critical error. - return make_missing_nonce_error(nonce) - } - } - - // Check if some nonces from the beginning of the range are missing. This may happen if - // some messages were already pruned from the source node. This is not a critical error - // and will be auto-resolved by messages lane (and target node). - if nonces_iter.peek().is_some() { - log::info!( - target: "bridge", - "Some messages are missing from the {} node: {:?}. Target node may be out of sync?", - C::NAME, - nonces_iter.rev().collect::>(), - ); - } - - Ok(()) -} - -fn split_msgs_to_refine( - lane_id: LaneId, - msgs_to_refine: MessagesToRefine, -) -> Result, SubstrateError> { - let max_batch_size = Target::max_extrinsic_size() as usize; - let mut batches = vec![]; - - let mut current_msgs_batch = msgs_to_refine; - while !current_msgs_batch.is_empty() { - let mut next_msgs_batch = vec![]; - while (lane_id, ¤t_msgs_batch).encoded_size() > max_batch_size { - if current_msgs_batch.len() <= 1 { - return Err(SubstrateError::Custom(format!( - "Call of {} at {} can't be executed even if only one message is supplied. \ - max_extrinsic_size(): {}", - Source::FROM_CHAIN_MESSAGE_DETAILS_METHOD, - Target::NAME, - Target::max_extrinsic_size(), - ))) - } - - if let Some(msg) = current_msgs_batch.pop() { - next_msgs_batch.insert(0, msg); - } - } - - batches.push(current_msgs_batch); - current_msgs_batch = next_msgs_batch; - } - - Ok(batches) -} - -#[cfg(test)] -mod tests { - use super::*; - use bp_runtime::Chain as ChainBase; - use relay_bridge_hub_rococo_client::BridgeHubRococo; - use relay_bridge_hub_westend_client::BridgeHubWestend; - - fn message_details_from_rpc( - nonces: RangeInclusive, - ) -> Vec { - nonces - .into_iter() - .map(|nonce| bp_messages::OutboundMessageDetails { - nonce, - dispatch_weight: Weight::zero(), - size: 0, - }) - .collect() - } - - #[test] - fn validate_out_msgs_details_succeeds_if_no_messages_are_missing() { - assert!(validate_out_msgs_details::( - &message_details_from_rpc(1..=3), - 1..=3, - ) - .is_ok()); - } - - #[test] - fn validate_out_msgs_details_succeeds_if_head_messages_are_missing() { - assert!(validate_out_msgs_details::( - &message_details_from_rpc(2..=3), - 1..=3, - ) - .is_ok()) - } - - #[test] - fn validate_out_msgs_details_fails_if_mid_messages_are_missing() { - let mut message_details_from_rpc = message_details_from_rpc(1..=3); - message_details_from_rpc.remove(1); - assert!(matches!( - validate_out_msgs_details::(&message_details_from_rpc, 1..=3,), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn validate_out_msgs_details_map_fails_if_tail_messages_are_missing() { - assert!(matches!( - validate_out_msgs_details::(&message_details_from_rpc(1..=2), 1..=3,), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn validate_out_msgs_details_fails_if_all_messages_are_missing() { - assert!(matches!( - validate_out_msgs_details::(&[], 1..=3), - Err(SubstrateError::Custom(_)) - )); - } - - #[test] - fn validate_out_msgs_details_fails_if_more_messages_than_nonces() { - assert!(matches!( - validate_out_msgs_details::(&message_details_from_rpc(1..=5), 2..=5,), - Err(SubstrateError::Custom(_)) - )); - } - - fn check_split_msgs_to_refine( - payload_sizes: Vec, - expected_batches: Result, ()>, - ) { - let mut out_msgs_details = vec![]; - for (idx, _) in payload_sizes.iter().enumerate() { - out_msgs_details.push(OutboundMessageDetails { - nonce: idx as MessageNonce, - dispatch_weight: Weight::zero(), - size: 0, - }); - } - - let mut msgs_to_refine = vec![]; - for (&payload_size, out_msg_details) in - payload_sizes.iter().zip(out_msgs_details.iter_mut()) - { - let payload = vec![1u8; payload_size]; - msgs_to_refine.push((payload, out_msg_details)); - } - - let maybe_batches = split_msgs_to_refine::( - Default::default(), - msgs_to_refine, - ); - match expected_batches { - Ok(expected_batches) => { - let batches = maybe_batches.unwrap(); - let mut idx = 0; - assert_eq!(batches.len(), expected_batches.len()); - for (batch, &expected_batch_size) in batches.iter().zip(expected_batches.iter()) { - assert_eq!(batch.len(), expected_batch_size); - for msg_to_refine in batch { - assert_eq!(msg_to_refine.0.len(), payload_sizes[idx]); - idx += 1; - } - } - }, - Err(_) => { - matches!(maybe_batches, Err(SubstrateError::Custom(_))); - }, - } - } - - #[test] - fn test_split_msgs_to_refine() { - let max_extrinsic_size = BridgeHubRococo::max_extrinsic_size() as usize; - - // Check that an error is returned when one of the messages is too big. - check_split_msgs_to_refine(vec![max_extrinsic_size], Err(())); - check_split_msgs_to_refine(vec![50, 100, max_extrinsic_size, 200], Err(())); - - // Otherwise check that the split is valid. - check_split_msgs_to_refine(vec![100, 200, 300, 400], Ok(vec![4])); - check_split_msgs_to_refine( - vec![ - 50, - 100, - max_extrinsic_size - 500, - 500, - 1000, - 1500, - max_extrinsic_size - 3500, - 5000, - 10000, - ], - Ok(vec![3, 4, 2]), - ); - check_split_msgs_to_refine( - vec![ - 50, - 100, - max_extrinsic_size - 150, - 500, - 1000, - 1500, - max_extrinsic_size - 3000, - 5000, - 10000, - ], - Ok(vec![2, 1, 3, 1, 2]), - ); - check_split_msgs_to_refine( - vec![ - 5000, - 10000, - max_extrinsic_size - 3500, - 500, - 1000, - 1500, - max_extrinsic_size - 500, - 50, - 100, - ], - Ok(vec![2, 4, 3]), - ); - } -} diff --git a/relays/lib-substrate-relay/src/messages_target.rs b/relays/lib-substrate-relay/src/messages_target.rs deleted file mode 100644 index 9396e785530d2ec5855e332e8db3dd7836938f25..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/messages_target.rs +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate client as Substrate messages target. The chain we connect to should have -//! runtime that implements `HeaderApi` to allow bridging with -//! `` chain. - -use crate::{ - messages_lane::{ - BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesProofCallBuilder, - SubstrateMessageLane, - }, - messages_source::{ensure_messages_pallet_active, read_client_state, SubstrateMessagesProof}, - on_demand::OnDemandRelay, - TransactionParams, -}; - -use async_std::sync::Arc; -use async_trait::async_trait; -use bp_messages::{ - storage_keys::inbound_lane_data_key, ChainWithMessages as _, InboundLaneData, LaneId, - MessageNonce, UnrewardedRelayersState, -}; -use bridge_runtime_common::messages::source::FromBridgedChainMessagesDeliveryProof; -use messages_relay::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{NoncesSubmitArtifacts, TargetClient, TargetClientState}, -}; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, CallOf, Client, Error as SubstrateError, HashOf, - TransactionEra, TransactionTracker, UnsignedTransaction, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_core::Pair; -use std::{convert::TryFrom, ops::RangeInclusive}; - -/// Message receiving proof returned by the target Substrate node. -pub type SubstrateMessagesDeliveryProof = - (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); - -/// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - target_client: Client, - source_client: Client, - lane_id: LaneId, - relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, - source_to_target_headers_relay: Option>>, -} - -impl SubstrateMessagesTarget

{ - /// Create new Substrate headers target. - pub fn new( - target_client: Client, - source_client: Client, - lane_id: LaneId, - relayer_id_at_source: AccountIdOf, - transaction_params: TransactionParams>, - source_to_target_headers_relay: Option< - Arc>, - >, - ) -> Self { - SubstrateMessagesTarget { - target_client, - source_client, - lane_id, - relayer_id_at_source, - transaction_params, - source_to_target_headers_relay, - } - } - - /// Read inbound lane state from the on-chain storage at given block. - async fn inbound_lane_data( - &self, - id: TargetHeaderIdOf>, - ) -> Result>>, SubstrateError> { - self.target_client - .storage_value( - inbound_lane_data_key( - P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - ), - Some(id.1), - ) - .await - } - - /// Ensure that the messages pallet at target chain is active. - async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.target_client).await - } -} - -impl Clone for SubstrateMessagesTarget

{ - fn clone(&self) -> Self { - Self { - target_client: self.target_client.clone(), - source_client: self.source_client.clone(), - lane_id: self.lane_id, - relayer_id_at_source: self.relayer_id_at_source.clone(), - transaction_params: self.transaction_params.clone(), - source_to_target_headers_relay: self.source_to_target_headers_relay.clone(), - } - } -} - -#[async_trait] -impl RelayClient for SubstrateMessagesTarget

{ - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - // since the client calls RPC methods on both sides, we need to reconnect both - self.target_client.reconnect().await?; - self.source_client.reconnect().await?; - - // call reconnect on on-demand headers relay, because we may use different chains there - // and the error that has lead to reconnect may have came from those other chains - // (see `require_source_header_on_target`) - // - // this may lead to multiple reconnects to the same node during the same call and it - // needs to be addressed in the future - // TODO: https://github.com/paritytech/parity-bridges-common/issues/1928 - if let Some(ref mut source_to_target_headers_relay) = self.source_to_target_headers_relay { - source_to_target_headers_relay.reconnect().await?; - } - - Ok(()) - } -} - -#[async_trait] -impl TargetClient> for SubstrateMessagesTarget

-where - AccountIdOf: From< as Pair>::Public>, - BalanceOf: TryFrom>, -{ - type BatchTransaction = - BatchProofTransaction; - type TransactionTracker = TransactionTracker>; - - async fn state(&self) -> Result>, SubstrateError> { - // we can't continue to deliver confirmations if source node is out of sync, because - // it may have already received confirmations that we're going to deliver - // - // we can't continue to deliver messages if target node is out of sync, because - // it may have already received (some of) messages that we're going to deliver - self.source_client.ensure_synced().await?; - self.target_client.ensure_synced().await?; - // we can't relay messages if messages pallet at target chain is halted - self.ensure_pallet_active().await?; - - read_client_state(&self.target_client, Some(&self.source_client)).await - } - - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf>, - ) -> Result<(TargetHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is received - let latest_received_nonce = self - .inbound_lane_data(id) - .await? - .map(|data| data.last_delivered_nonce()) - .unwrap_or(0); - Ok((id, latest_received_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf>, - ) -> Result<(TargetHeaderIdOf>, MessageNonce), SubstrateError> { - // lane data missing from the storage is fine until first message is received - let last_confirmed_nonce = self - .inbound_lane_data(id) - .await? - .map(|data| data.last_confirmed_nonce) - .unwrap_or(0); - Ok((id, last_confirmed_nonce)) - } - - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf>, - ) -> Result<(TargetHeaderIdOf>, UnrewardedRelayersState), SubstrateError> - { - let inbound_lane_data = - self.inbound_lane_data(id).await?.unwrap_or(InboundLaneData::default()); - Ok((id, (&inbound_lane_data).into())) - } - - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf>, - ) -> Result< - ( - TargetHeaderIdOf>, - as MessageLane>::MessagesReceivingProof, - ), - SubstrateError, - > { - let (id, relayers_state) = self.unrewarded_relayers_state(id).await?; - let inbound_data_key = bp_messages::storage_keys::inbound_lane_data_key( - P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, - &self.lane_id, - ); - let proof = self - .target_client - .prove_storage(vec![inbound_data_key], id.1) - .await? - .into_iter_nodes() - .collect(); - let proof = FromBridgedChainMessagesDeliveryProof { - bridged_header_hash: id.1, - storage_proof: proof, - lane: self.lane_id, - }; - Ok((id, (relayers_state, proof))) - } - - async fn submit_messages_proof( - &self, - maybe_batch_tx: Option, - _generated_at_header: SourceHeaderIdOf>, - nonces: RangeInclusive, - proof: as MessageLane>::MessagesProof, - ) -> Result, SubstrateError> { - let messages_proof_call = make_messages_delivery_call::

( - self.relayer_id_at_source.clone(), - proof.1.nonces_start..=proof.1.nonces_end, - proof, - maybe_batch_tx.is_none(), - ); - let final_call = match maybe_batch_tx { - Some(batch_tx) => batch_tx.append_call_and_build(messages_proof_call), - None => messages_proof_call, - }; - - let transaction_params = self.transaction_params.clone(); - let tx_tracker = self - .target_client - .submit_and_watch_signed_extrinsic( - &self.transaction_params.signer, - move |best_block_id, transaction_nonce| { - Ok(UnsignedTransaction::new(final_call.into(), transaction_nonce) - .era(TransactionEra::new(best_block_id, transaction_params.mortality))) - }, - ) - .await?; - Ok(NoncesSubmitArtifacts { nonces, tx_tracker }) - } - - async fn require_source_header_on_target( - &self, - id: SourceHeaderIdOf>, - ) -> Result, SubstrateError> { - if let Some(ref source_to_target_headers_relay) = self.source_to_target_headers_relay { - if let Some(batch_tx) = - BatchProofTransaction::new(source_to_target_headers_relay.clone(), id.0).await? - { - return Ok(Some(batch_tx)) - } - - source_to_target_headers_relay.require_more_headers(id.0).await; - } - - Ok(None) - } -} - -/// Make messages delivery call from given proof. -fn make_messages_delivery_call( - relayer_id_at_source: AccountIdOf, - nonces: RangeInclusive, - proof: SubstrateMessagesProof, - trace_call: bool, -) -> CallOf { - let messages_count = nonces.end() - nonces.start() + 1; - let dispatch_weight = proof.0; - P::ReceiveMessagesProofCallBuilder::build_receive_messages_proof_call( - relayer_id_at_source, - proof, - messages_count as _, - dispatch_weight, - trace_call, - ) -} diff --git a/relays/lib-substrate-relay/src/on_demand/headers.rs b/relays/lib-substrate-relay/src/on_demand/headers.rs deleted file mode 100644 index 9488a46a56a2c1d280640195fe1aa07085d68ba5..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/on_demand/headers.rs +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! On-demand Substrate -> Substrate header finality relay. - -use crate::{ - finality::SubmitFinalityProofCallBuilder, finality_base::engine::MaxExpectedCallSizeCheck, -}; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use bp_header_chain::ConsensusLogReader; -use bp_runtime::HeaderIdProvider; -use futures::{select, FutureExt}; -use num_traits::{One, Saturating, Zero}; -use sp_runtime::traits::Header; - -use finality_relay::{FinalitySyncParams, HeadersToRelay, TargetClient as FinalityTargetClient}; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, Client, Error as SubstrateError, - HeaderIdOf, -}; -use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, FailedClient, MaybeConnectionError, - STALL_TIMEOUT, -}; - -use crate::{ - finality::{ - source::{RequiredHeaderNumberRef, SubstrateFinalitySource}, - target::SubstrateFinalityTarget, - SubstrateFinalitySyncPipeline, RECENT_FINALITY_PROOFS_LIMIT, - }, - finality_base::engine::Engine, - on_demand::OnDemandRelay, - TransactionParams, -}; - -/// On-demand Substrate <-> Substrate header finality relay. -/// -/// This relay may be requested to sync more headers, whenever some other relay (e.g. messages -/// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops -/// syncing headers. -#[derive(Clone)] -pub struct OnDemandHeadersRelay { - /// Relay task name. - relay_task_name: String, - /// Shared reference to maximal required finalized header number. - required_header_number: RequiredHeaderNumberRef, - /// Client of the source chain. - source_client: Client, - /// Client of the target chain. - target_client: Client, -} - -impl OnDemandHeadersRelay

{ - /// Create new on-demand headers relay. - /// - /// If `metrics_params` is `Some(_)`, the metrics of the finality relay are registered. - /// Otherwise, all required metrics must be exposed outside of this method. - pub fn new( - source_client: Client, - target_client: Client, - target_transaction_params: TransactionParams>, - headers_to_relay: HeadersToRelay, - metrics_params: Option, - ) -> Self - where - AccountIdOf: - From< as sp_core::Pair>::Public>, - { - let required_header_number = Arc::new(Mutex::new(Zero::zero())); - let this = OnDemandHeadersRelay { - relay_task_name: on_demand_headers_relay_name::(), - required_header_number: required_header_number.clone(), - source_client: source_client.clone(), - target_client: target_client.clone(), - }; - async_std::task::spawn(async move { - background_task::

( - source_client, - target_client, - target_transaction_params, - headers_to_relay, - required_header_number, - metrics_params, - ) - .await; - }); - - this - } -} - -#[async_trait] -impl OnDemandRelay - for OnDemandHeadersRelay

-{ - async fn reconnect(&self) -> Result<(), SubstrateError> { - // using clone is fine here (to avoid mut requirement), because clone on Client clones - // internal references - self.source_client.clone().reconnect().await?; - self.target_client.clone().reconnect().await - } - - async fn require_more_headers(&self, required_header: BlockNumberOf) { - let mut required_header_number = self.required_header_number.lock().await; - if required_header > *required_header_number { - log::trace!( - target: "bridge", - "[{}] More {} headers required. Going to sync up to the {}", - self.relay_task_name, - P::SourceChain::NAME, - required_header, - ); - - *required_header_number = required_header; - } - } - - async fn prove_header( - &self, - required_header: BlockNumberOf, - ) -> Result<(HeaderIdOf, Vec>), SubstrateError> { - const MAX_ITERATIONS: u32 = 4; - let mut iterations = 0; - let mut current_required_header = required_header; - loop { - // first find proper header (either `current_required_header`) or its descendant - let finality_source = - SubstrateFinalitySource::

::new(self.source_client.clone(), None); - let (header, mut proof) = - finality_source.prove_block_finality(current_required_header).await?; - let header_id = header.id(); - - // verify and optimize justification before including it into the call - let context = P::FinalityEngine::verify_and_optimize_proof( - &self.target_client, - &header, - &mut proof, - ) - .await?; - - // now we have the header and its proof, but we want to minimize our losses, so let's - // check if we'll get the full refund for submitting this header - let check_result = P::FinalityEngine::check_max_expected_call_size(&header, &proof); - if let MaxExpectedCallSizeCheck::Exceeds { call_size, max_call_size } = check_result { - iterations += 1; - current_required_header = header_id.number().saturating_add(One::one()); - if iterations < MAX_ITERATIONS { - log::debug!( - target: "bridge", - "[{}] Requested to prove {} head {:?}. Selected to prove {} head {:?}. But it is too large: {} vs {}. \ - Going to select next header", - self.relay_task_name, - P::SourceChain::NAME, - required_header, - P::SourceChain::NAME, - header_id, - call_size, - max_call_size, - ); - - continue; - } - } - - log::debug!( - target: "bridge", - "[{}] Requested to prove {} head {:?}. Selected to prove {} head {:?} (after {} iterations)", - self.relay_task_name, - P::SourceChain::NAME, - required_header, - P::SourceChain::NAME, - header_id, - iterations, - ); - - // and then craft the submit-proof call - let call = P::SubmitFinalityProofCallBuilder::build_submit_finality_proof_call( - header, proof, false, context, - ); - - return Ok((header_id, vec![call])); - } - } -} - -/// Background task that is responsible for starting headers relay. -async fn background_task( - source_client: Client, - target_client: Client, - target_transaction_params: TransactionParams>, - headers_to_relay: HeadersToRelay, - required_header_number: RequiredHeaderNumberRef, - metrics_params: Option, -) where - AccountIdOf: From< as sp_core::Pair>::Public>, -{ - let relay_task_name = on_demand_headers_relay_name::(); - let target_transactions_mortality = target_transaction_params.mortality; - let mut finality_source = SubstrateFinalitySource::

::new( - source_client.clone(), - Some(required_header_number.clone()), - ); - let mut finality_target = - SubstrateFinalityTarget::new(target_client.clone(), target_transaction_params); - let mut latest_non_mandatory_at_source = Zero::zero(); - - let mut restart_relay = true; - let finality_relay_task = futures::future::Fuse::terminated(); - futures::pin_mut!(finality_relay_task); - - loop { - select! { - _ = async_std::task::sleep(P::TargetChain::AVERAGE_BLOCK_INTERVAL).fuse() => {}, - _ = finality_relay_task => { - // this should never happen in practice given the current code - restart_relay = true; - }, - } - - // read best finalized source header number from source - let best_finalized_source_header_at_source = - best_finalized_source_header_at_source(&finality_source, &relay_task_name).await; - if matches!(best_finalized_source_header_at_source, Err(ref e) if e.is_connection_error()) { - relay_utils::relay_loop::reconnect_failed_client( - FailedClient::Source, - relay_utils::relay_loop::RECONNECT_DELAY, - &mut finality_source, - &mut finality_target, - ) - .await; - continue - } - - // read best finalized source header number from target - let best_finalized_source_header_at_target = - best_finalized_source_header_at_target::

(&finality_target, &relay_task_name).await; - if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) { - relay_utils::relay_loop::reconnect_failed_client( - FailedClient::Target, - relay_utils::relay_loop::RECONNECT_DELAY, - &mut finality_source, - &mut finality_target, - ) - .await; - continue - } - - // submit mandatory header if some headers are missing - let best_finalized_source_header_at_source_fmt = - format!("{best_finalized_source_header_at_source:?}"); - let best_finalized_source_header_at_target_fmt = - format!("{best_finalized_source_header_at_target:?}"); - let required_header_number_value = *required_header_number.lock().await; - let mandatory_scan_range = mandatory_headers_scan_range::( - best_finalized_source_header_at_source.ok(), - best_finalized_source_header_at_target.ok(), - required_header_number_value, - ) - .await; - - log::trace!( - target: "bridge", - "[{}] Mandatory headers scan range: ({:?}, {:?}, {:?}) -> {:?}", - relay_task_name, - required_header_number_value, - best_finalized_source_header_at_source_fmt, - best_finalized_source_header_at_target_fmt, - mandatory_scan_range, - ); - - if let Some(mandatory_scan_range) = mandatory_scan_range { - let relay_mandatory_header_result = relay_mandatory_header_from_range( - &finality_source, - &required_header_number, - best_finalized_source_header_at_target_fmt, - ( - std::cmp::max(mandatory_scan_range.0, latest_non_mandatory_at_source), - mandatory_scan_range.1, - ), - &relay_task_name, - ) - .await; - match relay_mandatory_header_result { - Ok(true) => (), - Ok(false) => { - // there are no (or we don't need to relay them) mandatory headers in the range - // => to avoid scanning the same headers over and over again, remember that - latest_non_mandatory_at_source = mandatory_scan_range.1; - - log::trace!( - target: "bridge", - "[{}] No mandatory {} headers in the range {:?}", - relay_task_name, - P::SourceChain::NAME, - mandatory_scan_range, - ); - }, - Err(e) => { - log::warn!( - target: "bridge", - "[{}] Failed to scan mandatory {} headers range ({:?}): {:?}", - relay_task_name, - P::SourceChain::NAME, - mandatory_scan_range, - e, - ); - - if e.is_connection_error() { - relay_utils::relay_loop::reconnect_failed_client( - FailedClient::Source, - relay_utils::relay_loop::RECONNECT_DELAY, - &mut finality_source, - &mut finality_target, - ) - .await; - continue - } - }, - } - } - - // start/restart relay - if restart_relay { - let stall_timeout = relay_substrate_client::transaction_stall_timeout( - target_transactions_mortality, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - - log::info!( - target: "bridge", - "[{}] Starting on-demand headers relay task\n\t\ - Headers to relay: {:?}\n\t\ - Tx mortality: {:?} (~{}m)\n\t\ - Stall timeout: {:?}", - relay_task_name, - headers_to_relay, - target_transactions_mortality, - stall_timeout.as_secs_f64() / 60.0f64, - stall_timeout, - ); - - finality_relay_task.set( - finality_relay::run( - finality_source.clone(), - finality_target.clone(), - FinalitySyncParams { - tick: std::cmp::max( - P::SourceChain::AVERAGE_BLOCK_INTERVAL, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - ), - recent_finality_proofs_limit: RECENT_FINALITY_PROOFS_LIMIT, - stall_timeout, - headers_to_relay, - }, - metrics_params.clone().unwrap_or_else(MetricsParams::disabled), - futures::future::pending(), - ) - .fuse(), - ); - - restart_relay = false; - } - } -} - -/// Returns `Some()` with inclusive range of headers which must be scanned for mandatory headers -/// and the first of such headers must be submitted to the target node. -async fn mandatory_headers_scan_range( - best_finalized_source_header_at_source: Option, - best_finalized_source_header_at_target: Option, - required_header_number: BlockNumberOf, -) -> Option<(C::BlockNumber, C::BlockNumber)> { - // if we have been unable to read header number from the target, then let's assume - // that it is the same as required header number. Otherwise we risk submitting - // unneeded transactions - let best_finalized_source_header_at_target = - best_finalized_source_header_at_target.unwrap_or(required_header_number); - - // if we have been unable to read header number from the source, then let's assume - // that it is the same as at the target - let best_finalized_source_header_at_source = - best_finalized_source_header_at_source.unwrap_or(best_finalized_source_header_at_target); - - // if relay is already asked to sync more headers than we have at source, don't do anything yet - if required_header_number >= best_finalized_source_header_at_source { - return None - } - - Some(( - best_finalized_source_header_at_target + One::one(), - best_finalized_source_header_at_source, - )) -} - -/// Try to find mandatory header in the inclusive headers range and, if one is found, ask to relay -/// it. -/// -/// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. -async fn relay_mandatory_header_from_range( - finality_source: &SubstrateFinalitySource

, - required_header_number: &RequiredHeaderNumberRef, - best_finalized_source_header_at_target: String, - range: (BlockNumberOf, BlockNumberOf), - relay_task_name: &str, -) -> Result { - // search for mandatory header first - let mandatory_source_header_number = - find_mandatory_header_in_range(finality_source, range).await?; - - // if there are no mandatory headers - we have nothing to do - let mandatory_source_header_number = match mandatory_source_header_number { - Some(mandatory_source_header_number) => mandatory_source_header_number, - None => return Ok(false), - }; - - // `find_mandatory_header` call may take a while => check if `required_header_number` is still - // less than our `mandatory_source_header_number` before logging anything - let mut required_header_number = required_header_number.lock().await; - if *required_header_number >= mandatory_source_header_number { - return Ok(false) - } - - log::trace!( - target: "bridge", - "[{}] Too many {} headers missing at target ({} vs {}). Going to sync up to the mandatory {}", - relay_task_name, - P::SourceChain::NAME, - best_finalized_source_header_at_target, - range.1, - mandatory_source_header_number, - ); - - *required_header_number = mandatory_source_header_number; - Ok(true) -} - -/// Read best finalized source block number from source client. -/// -/// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_source( - finality_source: &SubstrateFinalitySource

, - relay_task_name: &str, -) -> Result, relay_substrate_client::Error> { - finality_source.on_chain_best_finalized_block_number().await.map_err(|error| { - log::error!( - target: "bridge", - "[{}] Failed to read best finalized source header from source: {:?}", - relay_task_name, - error, - ); - - error - }) -} - -/// Read best finalized source block number from target client. -/// -/// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_target( - finality_target: &SubstrateFinalityTarget

, - relay_task_name: &str, -) -> Result, as RelayClient>::Error> -where - AccountIdOf: From< as sp_core::Pair>::Public>, -{ - finality_target - .best_finalized_source_block_id() - .await - .map_err(|error| { - log::error!( - target: "bridge", - "[{}] Failed to read best finalized source header from target: {:?}", - relay_task_name, - error, - ); - - error - }) - .map(|id| id.0) -} - -/// Read first mandatory header in given inclusive range. -/// -/// Returns `Ok(None)` if there were no mandatory headers in the range. -async fn find_mandatory_header_in_range( - finality_source: &SubstrateFinalitySource

, - range: (BlockNumberOf, BlockNumberOf), -) -> Result>, relay_substrate_client::Error> { - let mut current = range.0; - while current <= range.1 { - let header = finality_source.client().header_by_number(current).await?; - if >::ConsensusLogReader::schedules_authorities_change( - header.digest(), - ) { - return Ok(Some(current)) - } - - current += One::one(); - } - - Ok(None) -} - -/// On-demand headers relay task name. -fn on_demand_headers_relay_name() -> String { - format!("{}-to-{}-on-demand-headers", SourceChain::NAME, TargetChain::NAME) -} - -#[cfg(test)] -mod tests { - use super::*; - - type TestChain = relay_rococo_client::Rococo; - - const AT_SOURCE: Option = Some(10); - const AT_TARGET: Option = Some(1); - - #[async_std::test] - async fn mandatory_headers_scan_range_selects_range_if_some_headers_are_missing() { - assert_eq!( - mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, 0,).await, - Some((AT_TARGET.unwrap() + 1, AT_SOURCE.unwrap())), - ); - } - - #[async_std::test] - async fn mandatory_headers_scan_range_selects_nothing_if_already_queued() { - assert_eq!( - mandatory_headers_scan_range::(AT_SOURCE, AT_TARGET, AT_SOURCE.unwrap(),) - .await, - None, - ); - } -} diff --git a/relays/lib-substrate-relay/src/on_demand/mod.rs b/relays/lib-substrate-relay/src/on_demand/mod.rs deleted file mode 100644 index 00bb33d6740937ea7dde9066d9caad3ced2ca655..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/on_demand/mod.rs +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types and functions intended to ease adding of new Substrate -> Substrate -//! on-demand pipelines. - -use async_trait::async_trait; -use relay_substrate_client::{BlockNumberOf, CallOf, Chain, Error as SubstrateError, HeaderIdOf}; - -pub mod headers; -pub mod parachains; - -/// On-demand headers relay that is relaying finalizing headers only when requested. -#[async_trait] -pub trait OnDemandRelay: Send + Sync { - /// Reconnect to source and target nodes. - async fn reconnect(&self) -> Result<(), SubstrateError>; - - /// Ask relay to relay source header with given number to the target chain. - /// - /// Depending on implementation, on-demand relay may also relay `required_header` ancestors - /// (e.g. if they're mandatory), or its descendants. The request is considered complete if - /// the best avbailable header at the target chain has number that is larger than or equal - /// to the `required_header`. - async fn require_more_headers(&self, required_header: BlockNumberOf); - - /// Ask relay to prove source `required_header` to the `TargetChain`. - /// - /// Returns number of header that is proved (it may be the `required_header` or one of its - /// descendants) and calls for delivering the proof. - async fn prove_header( - &self, - required_header: BlockNumberOf, - ) -> Result<(HeaderIdOf, Vec>), SubstrateError>; -} diff --git a/relays/lib-substrate-relay/src/on_demand/parachains.rs b/relays/lib-substrate-relay/src/on_demand/parachains.rs deleted file mode 100644 index 966bdc3107203a61cf405adba2cf09124330954e..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ /dev/null @@ -1,1039 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! On-demand Substrate -> Substrate parachain finality relay. - -use crate::{ - messages_source::best_finalized_peer_header_at_self, - on_demand::OnDemandRelay, - parachains::{ - source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter, - SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline, - }, - TransactionParams, -}; - -use async_std::{ - channel::{unbounded, Receiver, Sender}, - sync::{Arc, Mutex}, -}; -use async_trait::async_trait; -use bp_polkadot_core::parachains::{ParaHash, ParaId}; -use bp_runtime::HeaderIdProvider; -use futures::{select, FutureExt}; -use num_traits::Zero; -use pallet_bridge_parachains::{RelayBlockHash, RelayBlockHasher, RelayBlockNumber}; -use parachains_relay::parachains_loop::{AvailableHeader, SourceClient, TargetClient}; -use relay_substrate_client::{ - is_ancient_block, AccountIdOf, AccountKeyPairOf, BlockNumberOf, CallOf, Chain, Client, - Error as SubstrateError, HashOf, HeaderIdOf, ParachainBase, -}; -use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, BlockNumberBase, FailedClient, - HeaderId, UniqueSaturatedInto, -}; -use std::fmt::Debug; - -/// On-demand Substrate <-> Substrate parachain finality relay. -/// -/// This relay may be requested to sync more parachain headers, whenever some other relay -/// (e.g. messages relay) needs it to continue its regular work. When enough parachain headers -/// are relayed, on-demand stops syncing headers. -#[derive(Clone)] -pub struct OnDemandParachainsRelay { - /// Relay task name. - relay_task_name: String, - /// Channel used to communicate with background task and ask for relay of parachain heads. - required_header_number_sender: Sender>, - /// Source relay chain client. - source_relay_client: Client, - /// Target chain client. - target_client: Client, - /// On-demand relay chain relay. - on_demand_source_relay_to_target_headers: - Arc>, -} - -impl OnDemandParachainsRelay

{ - /// Create new on-demand parachains relay. - /// - /// Note that the argument is the source relay chain client, not the parachain client. - /// That's because parachain finality is determined by the relay chain and we don't - /// need to connect to the parachain itself here. - pub fn new( - source_relay_client: Client, - target_client: Client, - target_transaction_params: TransactionParams>, - on_demand_source_relay_to_target_headers: Arc< - dyn OnDemandRelay, - >, - ) -> Self - where - P::SourceParachain: Chain, - P::SourceRelayChain: - Chain, - AccountIdOf: - From< as sp_core::Pair>::Public>, - { - let (required_header_number_sender, required_header_number_receiver) = unbounded(); - let this = OnDemandParachainsRelay { - relay_task_name: on_demand_parachains_relay_name::( - ), - required_header_number_sender, - source_relay_client: source_relay_client.clone(), - target_client: target_client.clone(), - on_demand_source_relay_to_target_headers: on_demand_source_relay_to_target_headers - .clone(), - }; - async_std::task::spawn(async move { - background_task::

( - source_relay_client, - target_client, - target_transaction_params, - on_demand_source_relay_to_target_headers, - required_header_number_receiver, - ) - .await; - }); - - this - } -} - -#[async_trait] -impl OnDemandRelay - for OnDemandParachainsRelay

-where - P::SourceParachain: Chain, -{ - async fn reconnect(&self) -> Result<(), SubstrateError> { - // using clone is fine here (to avoid mut requirement), because clone on Client clones - // internal references - self.source_relay_client.clone().reconnect().await?; - self.target_client.clone().reconnect().await?; - // we'll probably need to reconnect relay chain relayer clients also - self.on_demand_source_relay_to_target_headers.reconnect().await - } - - async fn require_more_headers(&self, required_header: BlockNumberOf) { - if let Err(e) = self.required_header_number_sender.send(required_header).await { - log::trace!( - target: "bridge", - "[{}] Failed to request {} header {:?}: {:?}", - self.relay_task_name, - P::SourceParachain::NAME, - required_header, - e, - ); - } - } - - /// Ask relay to prove source `required_header` to the `TargetChain`. - async fn prove_header( - &self, - required_parachain_header: BlockNumberOf, - ) -> Result<(HeaderIdOf, Vec>), SubstrateError> { - // select headers to prove - let parachains_source = ParachainsSource::

::new( - self.source_relay_client.clone(), - Arc::new(Mutex::new(AvailableHeader::Missing)), - ); - let env = (self, ¶chains_source); - let (need_to_prove_relay_block, selected_relay_block, selected_parachain_block) = - select_headers_to_prove(env, required_parachain_header).await?; - - log::debug!( - target: "bridge", - "[{}] Requested to prove {} head {:?}. Selected to prove {} head {:?} and {} head {:?}", - self.relay_task_name, - P::SourceParachain::NAME, - required_parachain_header, - P::SourceParachain::NAME, - selected_parachain_block, - P::SourceRelayChain::NAME, - if need_to_prove_relay_block { - Some(selected_relay_block) - } else { - None - }, - ); - - // now let's prove relay chain block (if needed) - let mut calls = Vec::new(); - let mut proved_relay_block = selected_relay_block; - if need_to_prove_relay_block { - let (relay_block, relay_prove_call) = self - .on_demand_source_relay_to_target_headers - .prove_header(selected_relay_block.number()) - .await?; - proved_relay_block = relay_block; - calls.extend(relay_prove_call); - } - - // despite what we've selected before (in `select_headers_to_prove` call), if headers relay - // have chose the different header (e.g. because there's no GRANDPA jusstification for it), - // we need to prove parachain head available at this header - let para_id = ParaId(P::SourceParachain::PARACHAIN_ID); - let mut proved_parachain_block = selected_parachain_block; - if proved_relay_block != selected_relay_block { - proved_parachain_block = parachains_source - .on_chain_para_head_id(proved_relay_block) - .await? - // this could happen e.g. if parachain has been offboarded? - .ok_or_else(|| { - SubstrateError::MissingRequiredParachainHead( - para_id, - proved_relay_block.number().unique_saturated_into(), - ) - })?; - - log::debug!( - target: "bridge", - "[{}] Selected to prove {} head {:?} and {} head {:?}. Instead proved {} head {:?} and {} head {:?}", - self.relay_task_name, - P::SourceParachain::NAME, - selected_parachain_block, - P::SourceRelayChain::NAME, - selected_relay_block, - P::SourceParachain::NAME, - proved_parachain_block, - P::SourceRelayChain::NAME, - proved_relay_block, - ); - } - - // and finally - prove parachain head - let (para_proof, para_hash) = - parachains_source.prove_parachain_head(proved_relay_block).await?; - calls.push(P::SubmitParachainHeadsCallBuilder::build_submit_parachain_heads_call( - proved_relay_block, - vec![(para_id, para_hash)], - para_proof, - false, - )); - - Ok((proved_parachain_block, calls)) - } -} - -/// Background task that is responsible for starting parachain headers relay. -async fn background_task( - source_relay_client: Client, - target_client: Client, - target_transaction_params: TransactionParams>, - on_demand_source_relay_to_target_headers: Arc< - dyn OnDemandRelay, - >, - required_parachain_header_number_receiver: Receiver>, -) where - P::SourceParachain: Chain, - P::SourceRelayChain: - Chain, - AccountIdOf: From< as sp_core::Pair>::Public>, -{ - let relay_task_name = on_demand_parachains_relay_name::(); - let target_transactions_mortality = target_transaction_params.mortality; - - let mut relay_state = RelayState::Idle; - let mut required_parachain_header_number = Zero::zero(); - let required_para_header_ref = Arc::new(Mutex::new(AvailableHeader::Unavailable)); - - let mut restart_relay = true; - let parachains_relay_task = futures::future::Fuse::terminated(); - futures::pin_mut!(parachains_relay_task); - - let mut parachains_source = - ParachainsSource::

::new(source_relay_client.clone(), required_para_header_ref.clone()); - let mut parachains_target = ParachainsTarget::

::new( - source_relay_client.clone(), - target_client.clone(), - target_transaction_params.clone(), - ); - - loop { - select! { - new_required_parachain_header_number = required_parachain_header_number_receiver.recv().fuse() => { - let new_required_parachain_header_number = match new_required_parachain_header_number { - Ok(new_required_parachain_header_number) => new_required_parachain_header_number, - Err(e) => { - log::error!( - target: "bridge", - "[{}] Background task has exited with error: {:?}", - relay_task_name, - e, - ); - - return; - }, - }; - - // keep in mind that we are not updating `required_para_header_ref` here, because - // then we'll be submitting all previous headers as well (while required relay headers are - // delivered) and we want to avoid that (to reduce cost) - if new_required_parachain_header_number > required_parachain_header_number { - log::trace!( - target: "bridge", - "[{}] More {} headers required. Going to sync up to the {}", - relay_task_name, - P::SourceParachain::NAME, - new_required_parachain_header_number, - ); - - required_parachain_header_number = new_required_parachain_header_number; - } - }, - _ = async_std::task::sleep(P::TargetChain::AVERAGE_BLOCK_INTERVAL).fuse() => {}, - _ = parachains_relay_task => { - // this should never happen in practice given the current code - restart_relay = true; - }, - } - - // the workflow of the on-demand parachains relay is: - // - // 1) message relay (or any other dependent relay) sees new message at parachain header - // `PH`; - // - // 2) it sees that the target chain does not know `PH`; - // - // 3) it asks on-demand parachains relay to relay `PH` to the target chain; - // - // Phase#1: relaying relay chain header - // - // 4) on-demand parachains relay waits for GRANDPA-finalized block of the source relay chain - // `RH` that is storing `PH` or its descendant. Let it be `PH'`; - // 5) it asks on-demand headers relay to relay `RH` to the target chain; - // 6) it waits until `RH` (or its descendant) is relayed to the target chain; - // - // Phase#2: relaying parachain header - // - // 7) on-demand parachains relay sets `ParachainsSource::maximal_header_number` to the - // `PH'.number()`. - // 8) parachains finality relay sees that the parachain head has been updated and relays - // `PH'` to the target chain. - - // select headers to relay - let relay_data = read_relay_data( - ¶chains_source, - ¶chains_target, - required_parachain_header_number, - ) - .await; - match relay_data { - Ok(relay_data) => { - let prev_relay_state = relay_state; - relay_state = select_headers_to_relay(&relay_data, relay_state); - log::trace!( - target: "bridge", - "[{}] Selected new relay state: {:?} using old state {:?} and data {:?}", - relay_task_name, - relay_state, - prev_relay_state, - relay_data, - ); - }, - Err(failed_client) => { - relay_utils::relay_loop::reconnect_failed_client( - failed_client, - relay_utils::relay_loop::RECONNECT_DELAY, - &mut parachains_source, - &mut parachains_target, - ) - .await; - continue - }, - } - - // we have selected our new 'state' => let's notify our source clients about our new - // requirements - match relay_state { - RelayState::Idle => (), - RelayState::RelayingRelayHeader(required_relay_header) => { - on_demand_source_relay_to_target_headers - .require_more_headers(required_relay_header) - .await; - }, - RelayState::RelayingParaHeader(required_para_header) => { - *required_para_header_ref.lock().await = - AvailableHeader::Available(required_para_header); - }, - } - - // start/restart relay - if restart_relay { - let stall_timeout = relay_substrate_client::transaction_stall_timeout( - target_transactions_mortality, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - relay_utils::STALL_TIMEOUT, - ); - - log::info!( - target: "bridge", - "[{}] Starting on-demand-parachains relay task\n\t\ - Tx mortality: {:?} (~{}m)\n\t\ - Stall timeout: {:?}", - relay_task_name, - target_transactions_mortality, - stall_timeout.as_secs_f64() / 60.0f64, - stall_timeout, - ); - - parachains_relay_task.set( - parachains_relay::parachains_loop::run( - parachains_source.clone(), - parachains_target.clone(), - MetricsParams::disabled(), - // we do not support free parachain headers relay in on-demand relays - false, - futures::future::pending(), - ) - .fuse(), - ); - - restart_relay = false; - } - } -} - -/// On-demand parachains relay task name. -fn on_demand_parachains_relay_name() -> String { - format!("{}-to-{}-on-demand-parachain", SourceChain::NAME, TargetChain::NAME) -} - -/// On-demand relay state. -#[derive(Clone, Copy, Debug, PartialEq)] -enum RelayState { - /// On-demand relay is not doing anything. - Idle, - /// Relaying given relay header to relay given parachain header later. - RelayingRelayHeader(RelayNumber), - /// Relaying given parachain header. - RelayingParaHeader(HeaderId), -} - -/// Data gathered from source and target clients, used by on-demand relay. -#[derive(Debug)] -struct RelayData { - /// Parachain header number that is required at the target chain. - pub required_para_header: ParaNumber, - /// Parachain header number, known to the target chain. - pub para_header_at_target: Option, - /// Parachain header id, known to the source (relay) chain. - pub para_header_at_source: Option>, - /// Parachain header, that is available at the source relay chain at `relay_header_at_target` - /// block. - /// - /// May be `None` if there's no `relay_header_at_target` yet, or if the - /// `relay_header_at_target` is too old and we think its state has been pruned. - pub para_header_at_relay_header_at_target: Option>, - /// Relay header number at the source chain. - pub relay_header_at_source: RelayNumber, - /// Relay header number at the target chain. - pub relay_header_at_target: Option, -} - -/// Read required data from source and target clients. -async fn read_relay_data( - source: &ParachainsSource

, - target: &ParachainsTarget

, - required_header_number: BlockNumberOf, -) -> Result< - RelayData< - HashOf, - BlockNumberOf, - BlockNumberOf, - >, - FailedClient, -> -where - ParachainsTarget

: - TargetClient> + RelayClient, -{ - let map_target_err = |e| { - log::error!( - target: "bridge", - "[{}] Failed to read relay data from {} client: {:?}", - on_demand_parachains_relay_name::(), - P::TargetChain::NAME, - e, - ); - FailedClient::Target - }; - let map_source_err = |e| { - log::error!( - target: "bridge", - "[{}] Failed to read relay data from {} client: {:?}", - on_demand_parachains_relay_name::(), - P::SourceRelayChain::NAME, - e, - ); - FailedClient::Source - }; - - let best_target_block_hash = target.best_block().await.map_err(map_target_err)?.1; - let para_header_at_target = best_finalized_peer_header_at_self::< - P::TargetChain, - P::SourceParachain, - >(target.target_client(), best_target_block_hash) - .await; - // if there are no parachain heads at the target (`NoParachainHeadAtTarget`), we'll need to - // submit at least one. Otherwise the pallet will be treated as uninitialized and messages - // sync will stall. - let para_header_at_target = match para_header_at_target { - Ok(Some(para_header_at_target)) => Some(para_header_at_target.0), - Ok(None) => None, - Err(e) => return Err(map_target_err(e)), - }; - - let best_finalized_relay_header = - source.client().best_finalized_header().await.map_err(map_source_err)?; - let best_finalized_relay_block_id = best_finalized_relay_header.id(); - let para_header_at_source = source - .on_chain_para_head_id(best_finalized_relay_block_id) - .await - .map_err(map_source_err)?; - - let relay_header_at_source = best_finalized_relay_block_id.0; - let relay_header_at_target = best_finalized_peer_header_at_self::< - P::TargetChain, - P::SourceRelayChain, - >(target.target_client(), best_target_block_hash) - .await - .map_err(map_target_err)?; - - // if relay header at target is too old then its state may already be discarded at the source - // => just use `None` in this case - // - // the same is for case when there's no relay header at target at all - let available_relay_header_at_target = - relay_header_at_target.filter(|relay_header_at_target| { - !is_ancient_block(relay_header_at_target.number(), relay_header_at_source) - }); - let para_header_at_relay_header_at_target = - if let Some(available_relay_header_at_target) = available_relay_header_at_target { - source - .on_chain_para_head_id(available_relay_header_at_target) - .await - .map_err(map_source_err)? - } else { - None - }; - - Ok(RelayData { - required_para_header: required_header_number, - para_header_at_target, - para_header_at_source, - relay_header_at_source, - relay_header_at_target: relay_header_at_target - .map(|relay_header_at_target| relay_header_at_target.0), - para_header_at_relay_header_at_target, - }) -} - -/// Select relay and parachain headers that need to be relayed. -fn select_headers_to_relay( - data: &RelayData, - state: RelayState, -) -> RelayState -where - ParaHash: Clone, - ParaNumber: Copy + PartialOrd + Zero, - RelayNumber: Copy + Debug + Ord, -{ - // we can't do anything until **relay chain** bridge GRANDPA pallet is not initialized at the - // target chain - let relay_header_at_target = match data.relay_header_at_target { - Some(relay_header_at_target) => relay_header_at_target, - None => return RelayState::Idle, - }; - - // Process the `RelayingRelayHeader` state. - if let &RelayState::RelayingRelayHeader(relay_header_number) = &state { - if relay_header_at_target < relay_header_number { - // The required relay header hasn't yet been relayed. Ask / wait for it. - return state - } - - // We may switch to `RelayingParaHeader` if parachain head is available. - if let Some(para_header_at_relay_header_at_target) = - data.para_header_at_relay_header_at_target.as_ref() - { - return RelayState::RelayingParaHeader(para_header_at_relay_header_at_target.clone()) - } - - // else use the regular process - e.g. we may require to deliver new relay header first - } - - // Process the `RelayingParaHeader` state. - if let RelayState::RelayingParaHeader(para_header_id) = &state { - let para_header_at_target_or_zero = data.para_header_at_target.unwrap_or_else(Zero::zero); - if para_header_at_target_or_zero < para_header_id.0 { - // The required parachain header hasn't yet been relayed. Ask / wait for it. - return state - } - } - - // if we haven't read para head from the source, we can't yet do anything - let para_header_at_source = match data.para_header_at_source { - Some(ref para_header_at_source) => para_header_at_source.clone(), - None => return RelayState::Idle, - }; - - // if we have parachain head at the source, but no parachain heads at the target, we'll need - // to deliver at least one parachain head - let (required_para_header, para_header_at_target) = match data.para_header_at_target { - Some(para_header_at_target) => (data.required_para_header, para_header_at_target), - None => (para_header_at_source.0, Zero::zero()), - }; - - // if we have already satisfied our "customer", do nothing - if required_para_header <= para_header_at_target { - return RelayState::Idle - } - - // if required header is not available even at the source chain, let's wait - if required_para_header > para_header_at_source.0 { - return RelayState::Idle - } - - // we will always try to sync latest parachain/relay header, even if we've been asked for some - // its ancestor - - // we need relay chain header first - if relay_header_at_target < data.relay_header_at_source { - return RelayState::RelayingRelayHeader(data.relay_header_at_source) - } - - // if all relay headers synced, we may start directly with parachain header - RelayState::RelayingParaHeader(para_header_at_source) -} - -/// Environment for the `select_headers_to_prove` call. -#[async_trait] -trait SelectHeadersToProveEnvironment { - /// Returns associated parachain id. - fn parachain_id(&self) -> ParaId; - /// Returns best finalized relay block. - async fn best_finalized_relay_block_at_source( - &self, - ) -> Result, SubstrateError>; - /// Returns best finalized relay block that is known at `P::TargetChain`. - async fn best_finalized_relay_block_at_target( - &self, - ) -> Result, SubstrateError>; - /// Returns best finalized parachain block at given source relay chain block. - async fn best_finalized_para_block_at_source( - &self, - at_relay_block: HeaderId, - ) -> Result>, SubstrateError>; -} - -#[async_trait] -impl<'a, P: SubstrateParachainsPipeline> - SelectHeadersToProveEnvironment< - BlockNumberOf, - HashOf, - BlockNumberOf, - HashOf, - > for (&'a OnDemandParachainsRelay

, &'a ParachainsSource

) -{ - fn parachain_id(&self) -> ParaId { - ParaId(P::SourceParachain::PARACHAIN_ID) - } - - async fn best_finalized_relay_block_at_source( - &self, - ) -> Result, SubstrateError> { - Ok(self.0.source_relay_client.best_finalized_header().await?.id()) - } - - async fn best_finalized_relay_block_at_target( - &self, - ) -> Result, SubstrateError> { - Ok(crate::messages_source::read_client_state::( - &self.0.target_client, - None, - ) - .await? - .best_finalized_peer_at_best_self - .ok_or(SubstrateError::BridgePalletIsNotInitialized)?) - } - - async fn best_finalized_para_block_at_source( - &self, - at_relay_block: HeaderIdOf, - ) -> Result>, SubstrateError> { - self.1.on_chain_para_head_id(at_relay_block).await - } -} - -/// Given request to prove `required_parachain_header`, select actual headers that need to be -/// proved. -async fn select_headers_to_prove( - env: impl SelectHeadersToProveEnvironment, - required_parachain_header: PBN, -) -> Result<(bool, HeaderId, HeaderId), SubstrateError> -where - RBH: Copy, - RBN: BlockNumberBase, - PBH: Copy, - PBN: BlockNumberBase, -{ - // parachains proof also requires relay header proof. Let's first select relay block - // number that we'll be dealing with - let best_finalized_relay_block_at_source = env.best_finalized_relay_block_at_source().await?; - let best_finalized_relay_block_at_target = env.best_finalized_relay_block_at_target().await?; - - // if we can't prove `required_header` even using `best_finalized_relay_block_at_source`, we - // can't do anything here - // (this shall not actually happen, given current code, because we only require finalized - // headers) - let best_possible_parachain_block = env - .best_finalized_para_block_at_source(best_finalized_relay_block_at_source) - .await? - .filter(|best_possible_parachain_block| { - best_possible_parachain_block.number() >= required_parachain_header - }) - .ok_or(SubstrateError::MissingRequiredParachainHead( - env.parachain_id(), - required_parachain_header.unique_saturated_into(), - ))?; - - // we don't require source node to be archive, so we can't craft storage proofs using - // ancient headers. So if the `best_finalized_relay_block_at_target` is too ancient, we - // can't craft storage proofs using it - let may_use_state_at_best_finalized_relay_block_at_target = !is_ancient_block( - best_finalized_relay_block_at_target.number(), - best_finalized_relay_block_at_source.number(), - ); - - // now let's check if `required_header` may be proved using - // `best_finalized_relay_block_at_target` - let selection = if may_use_state_at_best_finalized_relay_block_at_target { - env.best_finalized_para_block_at_source(best_finalized_relay_block_at_target) - .await? - .filter(|best_finalized_para_block_at_target| { - best_finalized_para_block_at_target.number() >= required_parachain_header - }) - .map(|best_finalized_para_block_at_target| { - (false, best_finalized_relay_block_at_target, best_finalized_para_block_at_target) - }) - } else { - None - }; - - Ok(selection.unwrap_or(( - true, - best_finalized_relay_block_at_source, - best_possible_parachain_block, - ))) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn relay_waits_for_relay_header_to_be_delivered() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 90, - para_header_at_target: Some(50), - para_header_at_source: Some(HeaderId(110, 110)), - relay_header_at_source: 800, - relay_header_at_target: Some(700), - para_header_at_relay_header_at_target: Some(HeaderId(100, 100)), - }, - RelayState::RelayingRelayHeader(750), - ), - RelayState::RelayingRelayHeader(750), - ); - } - - #[test] - fn relay_starts_relaying_requested_para_header_after_relay_header_is_delivered() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 90, - para_header_at_target: Some(50), - para_header_at_source: Some(HeaderId(110, 110)), - relay_header_at_source: 800, - relay_header_at_target: Some(750), - para_header_at_relay_header_at_target: Some(HeaderId(100, 100)), - }, - RelayState::RelayingRelayHeader(750), - ), - RelayState::RelayingParaHeader(HeaderId(100, 100)), - ); - } - - #[test] - fn relay_selects_better_para_header_after_better_relay_header_is_delivered() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 90, - para_header_at_target: Some(50), - para_header_at_source: Some(HeaderId(110, 110)), - relay_header_at_source: 800, - relay_header_at_target: Some(780), - para_header_at_relay_header_at_target: Some(HeaderId(105, 105)), - }, - RelayState::RelayingRelayHeader(750), - ), - RelayState::RelayingParaHeader(HeaderId(105, 105)), - ); - } - #[test] - fn relay_waits_for_para_header_to_be_delivered() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 90, - para_header_at_target: Some(50), - para_header_at_source: Some(HeaderId(110, 110)), - relay_header_at_source: 800, - relay_header_at_target: Some(780), - para_header_at_relay_header_at_target: Some(HeaderId(105, 105)), - }, - RelayState::RelayingParaHeader(HeaderId(105, 105)), - ), - RelayState::RelayingParaHeader(HeaderId(105, 105)), - ); - } - - #[test] - fn relay_stays_idle_if_required_para_header_is_already_delivered() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 90, - para_header_at_target: Some(105), - para_header_at_source: Some(HeaderId(110, 110)), - relay_header_at_source: 800, - relay_header_at_target: Some(780), - para_header_at_relay_header_at_target: Some(HeaderId(105, 105)), - }, - RelayState::Idle, - ), - RelayState::Idle, - ); - } - - #[test] - fn relay_waits_for_required_para_header_to_appear_at_source_1() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 120, - para_header_at_target: Some(105), - para_header_at_source: None, - relay_header_at_source: 800, - relay_header_at_target: Some(780), - para_header_at_relay_header_at_target: Some(HeaderId(105, 105)), - }, - RelayState::Idle, - ), - RelayState::Idle, - ); - } - - #[test] - fn relay_waits_for_required_para_header_to_appear_at_source_2() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 120, - para_header_at_target: Some(105), - para_header_at_source: Some(HeaderId(110, 110)), - relay_header_at_source: 800, - relay_header_at_target: Some(780), - para_header_at_relay_header_at_target: Some(HeaderId(105, 105)), - }, - RelayState::Idle, - ), - RelayState::Idle, - ); - } - - #[test] - fn relay_starts_relaying_relay_header_when_new_para_header_is_requested() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 120, - para_header_at_target: Some(105), - para_header_at_source: Some(HeaderId(125, 125)), - relay_header_at_source: 800, - relay_header_at_target: Some(780), - para_header_at_relay_header_at_target: Some(HeaderId(105, 105)), - }, - RelayState::Idle, - ), - RelayState::RelayingRelayHeader(800), - ); - } - - #[test] - fn relay_starts_relaying_para_header_when_new_para_header_is_requested() { - assert_eq!( - select_headers_to_relay( - &RelayData { - required_para_header: 120, - para_header_at_target: Some(105), - para_header_at_source: Some(HeaderId(125, 125)), - relay_header_at_source: 800, - relay_header_at_target: Some(800), - para_header_at_relay_header_at_target: Some(HeaderId(125, 125)), - }, - RelayState::Idle, - ), - RelayState::RelayingParaHeader(HeaderId(125, 125)), - ); - } - - #[test] - fn relay_goes_idle_when_parachain_is_deregistered() { - assert_eq!( - select_headers_to_relay::( - &RelayData { - required_para_header: 120, - para_header_at_target: Some(105), - para_header_at_source: None, - relay_header_at_source: 800, - relay_header_at_target: Some(800), - para_header_at_relay_header_at_target: None, - }, - RelayState::RelayingRelayHeader(800), - ), - RelayState::Idle, - ); - } - - #[test] - fn relay_starts_relaying_first_parachain_header() { - assert_eq!( - select_headers_to_relay::( - &RelayData { - required_para_header: 0, - para_header_at_target: None, - para_header_at_source: Some(HeaderId(125, 125)), - relay_header_at_source: 800, - relay_header_at_target: Some(800), - para_header_at_relay_header_at_target: Some(HeaderId(125, 125)), - }, - RelayState::Idle, - ), - RelayState::RelayingParaHeader(HeaderId(125, 125)), - ); - } - - #[test] - fn relay_starts_relaying_relay_header_to_relay_first_parachain_header() { - assert_eq!( - select_headers_to_relay::( - &RelayData { - required_para_header: 0, - para_header_at_target: None, - para_header_at_source: Some(HeaderId(125, 125)), - relay_header_at_source: 800, - relay_header_at_target: Some(700), - para_header_at_relay_header_at_target: Some(HeaderId(125, 125)), - }, - RelayState::Idle, - ), - RelayState::RelayingRelayHeader(800), - ); - } - - // tuple is: - // - // - best_finalized_relay_block_at_source - // - best_finalized_relay_block_at_target - // - best_finalized_para_block_at_source at best_finalized_relay_block_at_source - // - best_finalized_para_block_at_source at best_finalized_relay_block_at_target - #[async_trait] - impl SelectHeadersToProveEnvironment for (u32, u32, u32, u32) { - fn parachain_id(&self) -> ParaId { - ParaId(0) - } - - async fn best_finalized_relay_block_at_source( - &self, - ) -> Result, SubstrateError> { - Ok(HeaderId(self.0, self.0)) - } - - async fn best_finalized_relay_block_at_target( - &self, - ) -> Result, SubstrateError> { - Ok(HeaderId(self.1, self.1)) - } - - async fn best_finalized_para_block_at_source( - &self, - at_relay_block: HeaderId, - ) -> Result>, SubstrateError> { - if at_relay_block.0 == self.0 { - Ok(Some(HeaderId(self.2, self.2))) - } else if at_relay_block.0 == self.1 { - Ok(Some(HeaderId(self.3, self.3))) - } else { - Ok(None) - } - } - } - - #[async_std::test] - async fn select_headers_to_prove_returns_err_if_required_para_block_is_missing_at_source() { - assert!(matches!( - select_headers_to_prove((20_u32, 10_u32, 200_u32, 100_u32), 300_u32,).await, - Err(SubstrateError::MissingRequiredParachainHead(ParaId(0), 300_u64)), - )); - } - - #[async_std::test] - async fn select_headers_to_prove_fails_to_use_existing_ancient_relay_block() { - assert_eq!( - select_headers_to_prove((220_u32, 10_u32, 200_u32, 100_u32), 100_u32,) - .await - .map_err(drop), - Ok((true, HeaderId(220, 220), HeaderId(200, 200))), - ); - } - - #[async_std::test] - async fn select_headers_to_prove_is_able_to_use_existing_recent_relay_block() { - assert_eq!( - select_headers_to_prove((40_u32, 10_u32, 200_u32, 100_u32), 100_u32,) - .await - .map_err(drop), - Ok((false, HeaderId(10, 10), HeaderId(100, 100))), - ); - } - - #[async_std::test] - async fn select_headers_to_prove_uses_new_relay_block() { - assert_eq!( - select_headers_to_prove((20_u32, 10_u32, 200_u32, 100_u32), 200_u32,) - .await - .map_err(drop), - Ok((true, HeaderId(20, 20), HeaderId(200, 200))), - ); - } -} diff --git a/relays/lib-substrate-relay/src/parachains/mod.rs b/relays/lib-substrate-relay/src/parachains/mod.rs deleted file mode 100644 index 8b128bb770dd7a05d28ad46d4561f4d859b1deb6..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/parachains/mod.rs +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Types and functions intended to ease adding of new Substrate -> Substrate -//! parachain finality proofs synchronization pipelines. - -use async_trait::async_trait; -use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; -use pallet_bridge_parachains::{ - Call as BridgeParachainsCall, Config as BridgeParachainsConfig, RelayBlockHash, - RelayBlockHasher, RelayBlockNumber, -}; -use parachains_relay::ParachainsPipeline; -use relay_substrate_client::{ - CallOf, Chain, ChainWithTransactions, HeaderIdOf, Parachain, RelayChain, -}; -use std::{fmt::Debug, marker::PhantomData}; - -pub mod source; -pub mod target; - -/// Substrate -> Substrate parachain finality proofs synchronization pipeline. -/// -/// This is currently restricted to the single parachain, because it is how it -/// will be used (at least) initially. -#[async_trait] -pub trait SubstrateParachainsPipeline: 'static + Clone + Debug + Send + Sync { - /// Headers of this parachain are submitted to the `Self::TargetChain`. - type SourceParachain: Parachain; - /// Relay chain that is storing headers of `Self::SourceParachain`. - type SourceRelayChain: RelayChain; - /// Target chain where `Self::SourceParachain` headers are submitted. - type TargetChain: ChainWithTransactions; - - /// How submit parachains heads call is built? - type SubmitParachainHeadsCallBuilder: SubmitParachainHeadsCallBuilder; -} - -/// Adapter that allows all `SubstrateParachainsPipeline` to act as `ParachainsPipeline`. -#[derive(Clone, Debug)] -pub struct ParachainsPipelineAdapter { - _phantom: PhantomData

, -} - -impl ParachainsPipeline for ParachainsPipelineAdapter

{ - type SourceParachain = P::SourceParachain; - type SourceRelayChain = P::SourceRelayChain; - type TargetChain = P::TargetChain; -} - -/// Different ways of building `submit_parachain_heads` calls. -pub trait SubmitParachainHeadsCallBuilder: - 'static + Send + Sync -{ - /// Given parachains and their heads proof, build call of `submit_parachain_heads` - /// function of bridge parachains module at the target chain. - fn build_submit_parachain_heads_call( - at_relay_block: HeaderIdOf, - parachains: Vec<(ParaId, ParaHash)>, - parachain_heads_proof: ParaHeadsProof, - is_free_execution_expected: bool, - ) -> CallOf; -} - -/// Building `submit_parachain_heads` call when you have direct access to the target -/// chain runtime. -pub struct DirectSubmitParachainHeadsCallBuilder { - _phantom: PhantomData<(P, R, I)>, -} - -impl SubmitParachainHeadsCallBuilder

for DirectSubmitParachainHeadsCallBuilder -where - P: SubstrateParachainsPipeline, - P::SourceRelayChain: Chain, - R: BridgeParachainsConfig + Send + Sync, - I: 'static + Send + Sync, - R::BridgedChain: bp_runtime::Chain< - BlockNumber = RelayBlockNumber, - Hash = RelayBlockHash, - Hasher = RelayBlockHasher, - >, - CallOf: From>, -{ - fn build_submit_parachain_heads_call( - at_relay_block: HeaderIdOf, - parachains: Vec<(ParaId, ParaHash)>, - parachain_heads_proof: ParaHeadsProof, - _is_free_execution_expected: bool, - ) -> CallOf { - BridgeParachainsCall::::submit_parachain_heads { - at_relay_block: (at_relay_block.0, at_relay_block.1), - parachains, - parachain_heads_proof, - } - .into() - } -} diff --git a/relays/lib-substrate-relay/src/parachains/source.rs b/relays/lib-substrate-relay/src/parachains/source.rs deleted file mode 100644 index 32d70cf425f0ba6fe88b3685d2d730a6eacd28da..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/parachains/source.rs +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Parachain heads source. - -use crate::parachains::{ParachainsPipelineAdapter, SubstrateParachainsPipeline}; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use bp_parachains::parachain_head_storage_key_at_source; -use bp_polkadot_core::parachains::{ParaHash, ParaHead, ParaHeadsProof, ParaId}; -use bp_runtime::HeaderIdProvider; -use codec::Decode; -use parachains_relay::parachains_loop::{AvailableHeader, SourceClient}; -use relay_substrate_client::{ - is_ancient_block, Chain, Client, Error as SubstrateError, HeaderIdOf, HeaderOf, ParachainBase, - RelayChain, -}; -use relay_utils::relay_loop::Client as RelayClient; - -/// Shared updatable reference to the maximal parachain header id that we want to sync from the -/// source. -pub type RequiredHeaderIdRef = Arc>>>; - -/// Substrate client as parachain heads source. -#[derive(Clone)] -pub struct ParachainsSource { - client: Client, - max_head_id: RequiredHeaderIdRef, -} - -impl ParachainsSource

{ - /// Creates new parachains source client. - pub fn new( - client: Client, - max_head_id: RequiredHeaderIdRef, - ) -> Self { - ParachainsSource { client, max_head_id } - } - - /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { - &self.client - } - - /// Return decoded head of given parachain. - pub async fn on_chain_para_head_id( - &self, - at_block: HeaderIdOf, - ) -> Result>, SubstrateError> { - let para_id = ParaId(P::SourceParachain::PARACHAIN_ID); - let storage_key = - parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, para_id); - let para_head = self.client.raw_storage_value(storage_key, Some(at_block.1)).await?; - let para_head = para_head.map(|h| ParaHead::decode(&mut &h.0[..])).transpose()?; - let para_head = match para_head { - Some(para_head) => para_head, - None => return Ok(None), - }; - let para_head: HeaderOf = Decode::decode(&mut ¶_head.0[..])?; - Ok(Some(para_head.id())) - } -} - -#[async_trait] -impl RelayClient for ParachainsSource

{ - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.client.reconnect().await - } -} - -#[async_trait] -impl SourceClient> - for ParachainsSource

-where - P::SourceParachain: Chain, -{ - async fn ensure_synced(&self) -> Result { - match self.client.ensure_synced().await { - Ok(_) => Ok(true), - Err(SubstrateError::ClientNotSynced(_)) => Ok(false), - Err(e) => Err(e), - } - } - - async fn parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result>, Self::Error> { - // if requested relay header is ancient, then we don't even want to try to read the - // parachain head - we simply return `Unavailable` - let best_block_number = self.client.best_finalized_header_number().await?; - if is_ancient_block(at_block.number(), best_block_number) { - return Ok(AvailableHeader::Unavailable) - } - - // else - try to read head from the source client - let mut para_head_id = AvailableHeader::Missing; - if let Some(on_chain_para_head_id) = self.on_chain_para_head_id(at_block).await? { - // Never return head that is larger than requested. This way we'll never sync - // headers past `max_header_id`. - para_head_id = match *self.max_head_id.lock().await { - AvailableHeader::Unavailable => AvailableHeader::Unavailable, - AvailableHeader::Missing => { - // `max_header_id` is not set. There is no limit. - AvailableHeader::Available(on_chain_para_head_id) - }, - AvailableHeader::Available(max_head_id) if on_chain_para_head_id >= max_head_id => { - // We report at most `max_header_id`. - AvailableHeader::Available(std::cmp::min(on_chain_para_head_id, max_head_id)) - }, - AvailableHeader::Available(_) => { - // the `max_head_id` is not yet available at the source chain => wait and avoid - // syncing extra headers - AvailableHeader::Unavailable - }, - } - } - - Ok(para_head_id) - } - - async fn prove_parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result<(ParaHeadsProof, ParaHash), Self::Error> { - let parachain = ParaId(P::SourceParachain::PARACHAIN_ID); - let storage_key = - parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, parachain); - let parachain_heads_proof = self - .client - .prove_storage(vec![storage_key.clone()], at_block.1) - .await? - .into_iter_nodes() - .collect(); - - // why we're reading parachain head here once again (it has already been read at the - // `parachain_head`)? that's because `parachain_head` sometimes returns obsolete parachain - // head and loop sometimes asks to prove this obsolete head and gets other (actual) head - // instead - // - // => since we want to provide proper hashes in our `submit_parachain_heads` call, we're - // rereading actual value here - let parachain_head = self - .client - .raw_storage_value(storage_key, Some(at_block.1)) - .await? - .map(|h| ParaHead::decode(&mut &h.0[..])) - .transpose()? - .ok_or_else(|| { - SubstrateError::Custom(format!( - "Failed to read expected parachain {parachain:?} head at {at_block:?}" - )) - })?; - let parachain_head_hash = parachain_head.hash(); - - Ok((ParaHeadsProof { storage_proof: parachain_heads_proof }, parachain_head_hash)) - } -} diff --git a/relays/lib-substrate-relay/src/parachains/target.rs b/relays/lib-substrate-relay/src/parachains/target.rs deleted file mode 100644 index e10d15b6edf6c75773e2e10bc9caf2e406632284..0000000000000000000000000000000000000000 --- a/relays/lib-substrate-relay/src/parachains/target.rs +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Parachain heads target. - -use crate::{ - parachains::{ - ParachainsPipelineAdapter, SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline, - }, - TransactionParams, -}; - -use async_trait::async_trait; -use bp_parachains::{ - ImportedParaHeadsKeyProvider, ParaInfo, ParaStoredHeaderData, ParasInfoKeyProvider, -}; -use bp_polkadot_core::{ - parachains::{ParaHash, ParaHeadsProof, ParaId}, - BlockNumber as RelayBlockNumber, -}; -use bp_runtime::{ - Chain as ChainBase, HeaderId, HeaderIdProvider, StorageDoubleMapKeyProvider, - StorageMapKeyProvider, -}; -use parachains_relay::parachains_loop::TargetClient; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, Client, Error as SubstrateError, - HeaderIdOf, ParachainBase, RelayChain, TransactionEra, TransactionTracker, UnsignedTransaction, -}; -use relay_utils::relay_loop::Client as RelayClient; -use sp_core::Pair; - -/// Substrate client as parachain heads source. -pub struct ParachainsTarget { - source_client: Client, - target_client: Client, - transaction_params: TransactionParams>, -} - -impl ParachainsTarget

{ - /// Creates new parachains target client. - pub fn new( - source_client: Client, - target_client: Client, - transaction_params: TransactionParams>, - ) -> Self { - ParachainsTarget { source_client, target_client, transaction_params } - } - - /// Returns reference to the underlying RPC client. - pub fn target_client(&self) -> &Client { - &self.target_client - } -} - -impl Clone for ParachainsTarget

{ - fn clone(&self) -> Self { - ParachainsTarget { - source_client: self.source_client.clone(), - target_client: self.target_client.clone(), - transaction_params: self.transaction_params.clone(), - } - } -} - -#[async_trait] -impl RelayClient for ParachainsTarget

{ - type Error = SubstrateError; - - async fn reconnect(&mut self) -> Result<(), SubstrateError> { - self.target_client.reconnect().await?; - self.source_client.reconnect().await?; - Ok(()) - } -} - -#[async_trait] -impl

TargetClient> for ParachainsTarget

-where - P: SubstrateParachainsPipeline, - AccountIdOf: From< as Pair>::Public>, - P::SourceParachain: ChainBase, - P::SourceRelayChain: ChainBase, -{ - type TransactionTracker = TransactionTracker>; - - async fn best_block(&self) -> Result, Self::Error> { - let best_header = self.target_client.best_header().await?; - let best_id = best_header.id(); - - Ok(best_id) - } - - async fn best_finalized_source_relay_chain_block( - &self, - at_block: &HeaderIdOf, - ) -> Result, Self::Error> { - self.target_client - .typed_state_call::<_, Option>>( - P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), - (), - Some(at_block.1), - ) - .await? - .map(Ok) - .unwrap_or(Err(SubstrateError::BridgePalletIsNotInitialized)) - } - - async fn free_source_relay_headers_interval( - &self, - ) -> Result>, Self::Error> { - self.target_client - .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) - .await - } - - async fn parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result< - Option<(HeaderIdOf, HeaderIdOf)>, - Self::Error, - > { - // read best parachain head from the target bridge-parachains pallet - let storage_key = ParasInfoKeyProvider::final_key( - P::SourceRelayChain::WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME, - &P::SourceParachain::PARACHAIN_ID.into(), - ); - let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; - let para_info = match storage_value { - Some(para_info) => para_info, - None => return Ok(None), - }; - - // now we need to get full header ids. For source relay chain it is simple, because we - // are connected - let relay_header_id = self - .source_client - .header_by_number(para_info.best_head_hash.at_relay_block_number) - .await? - .id(); - - // for parachain, we need to read from the target chain runtime storage - let storage_key = ImportedParaHeadsKeyProvider::final_key( - P::SourceRelayChain::WITH_CHAIN_BRIDGE_PARACHAINS_PALLET_NAME, - &P::SourceParachain::PARACHAIN_ID.into(), - ¶_info.best_head_hash.head_hash, - ); - let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; - let para_head_number = match storage_value { - Some(para_head_data) => - para_head_data.decode_parachain_head_data::()?.number, - None => return Ok(None), - }; - - let para_head_id = HeaderId(para_head_number, para_info.best_head_hash.head_hash); - Ok(Some((relay_header_id, para_head_id))) - } - - async fn submit_parachain_head_proof( - &self, - at_relay_block: HeaderIdOf, - updated_head_hash: ParaHash, - proof: ParaHeadsProof, - is_free_execution_expected: bool, - ) -> Result { - let transaction_params = self.transaction_params.clone(); - let call = P::SubmitParachainHeadsCallBuilder::build_submit_parachain_heads_call( - at_relay_block, - vec![(ParaId(P::SourceParachain::PARACHAIN_ID), updated_head_hash)], - proof, - is_free_execution_expected, - ); - self.target_client - .submit_and_watch_signed_extrinsic( - &transaction_params.signer, - move |best_block_id, transaction_nonce| { - Ok(UnsignedTransaction::new(call.into(), transaction_nonce) - .era(TransactionEra::new(best_block_id, transaction_params.mortality))) - }, - ) - .await - } -} diff --git a/relays/messages/Cargo.toml b/relays/messages/Cargo.toml deleted file mode 100644 index 3367e4bbd443900298aae2e87bab5ed0ef50fa1a..0000000000000000000000000000000000000000 --- a/relays/messages/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "messages-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -async-std = { version = "1.6.5", features = ["attributes"] } -async-trait = "0.1" -env_logger = "0.11" -futures = "0.3.30" -hex = "0.4" -log = { workspace = true } -num-traits = "0.2" -parking_lot = "0.12.1" - -# Bridge Dependencies - -bp-messages = { path = "../../primitives/messages" } -finality-relay = { path = "../finality" } -relay-utils = { path = "../utils" } - -sp-arithmetic = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/messages/src/lib.rs b/relays/messages/src/lib.rs deleted file mode 100644 index 9c62cee5ee3db1e0b8825d2893d53422bf44a33c..0000000000000000000000000000000000000000 --- a/relays/messages/src/lib.rs +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relaying [`pallet-bridge-messages`](../pallet_bridge_messages/index.html) application specific -//! data. Message lane allows sending arbitrary messages between bridged chains. This -//! module provides entrypoint that starts reading messages from given message lane -//! of source chain and submits proof-of-message-at-source-chain transactions to the -//! target chain. Additionally, proofs-of-messages-delivery are sent back from the -//! target chain to the source chain. - -// required for futures::select! -#![recursion_limit = "1024"] -#![warn(missing_docs)] - -mod metrics; - -pub mod message_lane; -pub mod message_lane_loop; - -mod message_race_delivery; -mod message_race_limits; -mod message_race_loop; -mod message_race_receiving; -mod message_race_strategy; diff --git a/relays/messages/src/message_lane.rs b/relays/messages/src/message_lane.rs deleted file mode 100644 index 5c9728ad93abd5aa1ea9b2fc77b2a6f9968539f6..0000000000000000000000000000000000000000 --- a/relays/messages/src/message_lane.rs +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! One-way message lane types. Within single one-way lane we have three 'races' where we try to: -//! -//! 1) relay new messages from source to target node; -//! 2) relay proof-of-delivery from target to source node. - -use num_traits::{SaturatingAdd, Zero}; -use relay_utils::{BlockNumberBase, HeaderId}; -use sp_arithmetic::traits::AtLeast32BitUnsigned; -use std::{fmt::Debug, ops::Sub}; - -/// One-way message lane. -pub trait MessageLane: 'static + Clone + Send + Sync { - /// Name of the messages source. - const SOURCE_NAME: &'static str; - /// Name of the messages target. - const TARGET_NAME: &'static str; - - /// Messages proof. - type MessagesProof: Clone + Debug + Send + Sync; - /// Messages receiving proof. - type MessagesReceivingProof: Clone + Debug + Send + Sync; - - /// The type of the source chain token balance, that is used to: - /// - /// 1) pay transaction fees; - /// 2) pay message delivery and dispatch fee; - /// 3) pay relayer rewards. - type SourceChainBalance: AtLeast32BitUnsigned - + Clone - + Copy - + Debug - + PartialOrd - + Sub - + SaturatingAdd - + Zero - + Send - + Sync; - /// Number of the source header. - type SourceHeaderNumber: BlockNumberBase; - /// Hash of the source header. - type SourceHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; - - /// Number of the target header. - type TargetHeaderNumber: BlockNumberBase; - /// Hash of the target header. - type TargetHeaderHash: Clone + Debug + Default + PartialEq + Send + Sync; -} - -/// Source header id within given one-way message lane. -pub type SourceHeaderIdOf

= - HeaderId<

::SourceHeaderHash,

::SourceHeaderNumber>; - -/// Target header id within given one-way message lane. -pub type TargetHeaderIdOf

= - HeaderId<

::TargetHeaderHash,

::TargetHeaderNumber>; diff --git a/relays/messages/src/message_lane_loop.rs b/relays/messages/src/message_lane_loop.rs deleted file mode 100644 index b681d86d2ae8fadb6b0d5b5277de5a1285533d36..0000000000000000000000000000000000000000 --- a/relays/messages/src/message_lane_loop.rs +++ /dev/null @@ -1,1277 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Message delivery loop. Designed to work with messages pallet. -//! -//! Single relay instance delivers messages of single lane in single direction. -//! To serve two-way lane, you would need two instances of relay. -//! To serve N two-way lanes, you would need N*2 instances of relay. -//! -//! Please keep in mind that the best header in this file is actually best -//! finalized header. I.e. when talking about headers in lane context, we -//! only care about finalized headers. - -use std::{collections::BTreeMap, fmt::Debug, future::Future, ops::RangeInclusive, time::Duration}; - -use async_trait::async_trait; -use futures::{channel::mpsc::unbounded, future::FutureExt, stream::StreamExt}; - -use bp_messages::{LaneId, MessageNonce, UnrewardedRelayersState, Weight}; -use relay_utils::{ - interval, metrics::MetricsParams, process_future_result, relay_loop::Client as RelayClient, - retry_backoff, FailedClient, TransactionTracker, -}; - -use crate::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_race_delivery::run as run_message_delivery_race, - message_race_receiving::run as run_message_receiving_race, - metrics::MessageLaneLoopMetrics, -}; - -/// Message lane loop configuration params. -#[derive(Debug, Clone)] -pub struct Params { - /// Id of lane this loop is servicing. - pub lane: LaneId, - /// Interval at which we ask target node about its updates. - pub source_tick: Duration, - /// Interval at which we ask target node about its updates. - pub target_tick: Duration, - /// Delay between moments when connection error happens and our reconnect attempt. - pub reconnect_delay: Duration, - /// Message delivery race parameters. - pub delivery_params: MessageDeliveryParams, -} - -/// Message delivery race parameters. -#[derive(Debug, Clone)] -pub struct MessageDeliveryParams { - /// Maximal number of unconfirmed relayer entries at the inbound lane. If there's that number - /// of entries in the `InboundLaneData::relayers` set, all new messages will be rejected until - /// reward payment will be proved (by including outbound lane state to the message delivery - /// transaction). - pub max_unrewarded_relayer_entries_at_target: MessageNonce, - /// Message delivery race will stop delivering messages if there are - /// `max_unconfirmed_nonces_at_target` unconfirmed nonces on the target node. The race would - /// continue once they're confirmed by the receiving race. - pub max_unconfirmed_nonces_at_target: MessageNonce, - /// Maximal number of relayed messages in single delivery transaction. - pub max_messages_in_single_batch: MessageNonce, - /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. - pub max_messages_weight_in_single_batch: Weight, - /// Maximal cumulative size of relayed messages in single delivery transaction. - pub max_messages_size_in_single_batch: u32, -} - -/// Message details. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct MessageDetails { - /// Message dispatch weight. - pub dispatch_weight: Weight, - /// Message size (number of bytes in encoded payload). - pub size: u32, - /// The relayer reward paid in the source chain tokens. - pub reward: SourceChainBalance, -} - -/// Messages details map. -pub type MessageDetailsMap = - BTreeMap>; - -/// Message delivery race proof parameters. -#[derive(Debug, PartialEq, Eq)] -pub struct MessageProofParameters { - /// Include outbound lane state proof? - pub outbound_state_proof_required: bool, - /// Cumulative dispatch weight of messages that we're building proof for. - pub dispatch_weight: Weight, -} - -/// Artifacts of submitting nonces proof. -pub struct NoncesSubmitArtifacts { - /// Submitted nonces range. - pub nonces: RangeInclusive, - /// Submitted transaction tracker. - pub tx_tracker: T, -} - -/// Batch transaction that already submit some headers and needs to be extended with -/// messages/delivery proof before sending. -pub trait BatchTransaction: Debug + Send + Sync { - /// Header that was required in the original call and which is bundled within this - /// batch transaction. - fn required_header_id(&self) -> HeaderId; -} - -/// Source client trait. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Type of batch transaction that submits finality and message receiving proof. - type BatchTransaction: BatchTransaction> + Clone; - /// Transaction tracker to track submitted transactions. - type TransactionTracker: TransactionTracker>; - - /// Returns state of the client. - async fn state(&self) -> Result, Self::Error>; - - /// Get nonce of instance of latest generated message. - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; - - /// Get nonce of the latest message, which receiving has been confirmed by the target chain. - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf

, - ) -> Result<(SourceHeaderIdOf

, MessageNonce), Self::Error>; - - /// Returns mapping of message nonces, generated on this client, to their weights. - /// - /// Some messages may be missing from returned map, if corresponding messages were pruned at - /// the source chain. - async fn generated_message_details( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - ) -> Result, Self::Error>; - - /// Prove messages in inclusive range [begin; end]. - async fn prove_messages( - &self, - id: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error>; - - /// Submit messages receiving proof. - async fn submit_messages_receiving_proof( - &self, - maybe_batch_tx: Option, - generated_at_block: TargetHeaderIdOf

, - proof: P::MessagesReceivingProof, - ) -> Result; - - /// We need given finalized target header on source to continue synchronization. - /// - /// We assume that the absence of header `id` has already been checked by caller. - /// - /// The client may return `Some(_)`, which means that nothing has happened yet and - /// the caller must generate and append message receiving proof to the batch transaction - /// to actually send it (along with required header) to the node. - /// - /// If function has returned `None`, it means that the caller now must wait for the - /// appearance of the target header `id` at the source client. - async fn require_target_header_on_source( - &self, - id: TargetHeaderIdOf

, - ) -> Result, Self::Error>; -} - -/// Target client trait. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Type of batch transaction that submits finality and messages proof. - type BatchTransaction: BatchTransaction> + Clone; - /// Transaction tracker to track submitted transactions. - type TransactionTracker: TransactionTracker>; - - /// Returns state of the client. - async fn state(&self) -> Result, Self::Error>; - - /// Get nonce of latest received message. - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - - /// Get nonce of the latest confirmed message. - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, MessageNonce), Self::Error>; - - /// Get state of unrewarded relayers set at the inbound lane. - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, UnrewardedRelayersState), Self::Error>; - - /// Prove messages receiving at given block. - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf

, - ) -> Result<(TargetHeaderIdOf

, P::MessagesReceivingProof), Self::Error>; - - /// Submit messages proof. - async fn submit_messages_proof( - &self, - maybe_batch_tx: Option, - generated_at_header: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, Self::Error>; - - /// We need given finalized source header on target to continue synchronization. - /// - /// The client may return `Some(_)`, which means that nothing has happened yet and - /// the caller must generate and append messages proof to the batch transaction - /// to actually send it (along with required header) to the node. - /// - /// If function has returned `None`, it means that the caller now must wait for the - /// appearance of the source header `id` at the target client. - async fn require_source_header_on_target( - &self, - id: SourceHeaderIdOf

, - ) -> Result, Self::Error>; -} - -/// State of the client. -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct ClientState { - /// The best header id of this chain. - pub best_self: SelfHeaderId, - /// Best finalized header id of this chain. - pub best_finalized_self: SelfHeaderId, - /// Best finalized header id of the peer chain read at the best block of this chain (at - /// `best_finalized_self`). - /// - /// It may be `None` e,g. if peer is a parachain and we haven't yet relayed any parachain - /// heads. - pub best_finalized_peer_at_best_self: Option, - /// Header id of the peer chain with the number, matching the - /// `best_finalized_peer_at_best_self`. - pub actual_best_finalized_peer_at_best_self: Option, -} - -/// State of source client in one-way message lane. -pub type SourceClientState

= ClientState, TargetHeaderIdOf

>; - -/// State of target client in one-way message lane. -pub type TargetClientState

= ClientState, SourceHeaderIdOf

>; - -/// Both clients state. -#[derive(Debug, Default)] -pub struct ClientsState { - /// Source client state. - pub source: Option>, - /// Target client state. - pub target: Option>, -} - -/// Return prefix that will be used by default to expose Prometheus metrics of the finality proofs -/// sync loop. -pub fn metrics_prefix(lane: &LaneId) -> String { - format!("{}_to_{}_MessageLane_{}", P::SOURCE_NAME, P::TARGET_NAME, hex::encode(lane)) -} - -/// Run message lane service loop. -pub async fn run( - params: Params, - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_params: MetricsParams, - exit_signal: impl Future + Send + 'static, -) -> Result<(), relay_utils::Error> { - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .reconnect_delay(params.reconnect_delay) - .with_metrics(metrics_params) - .loop_metric(MessageLaneLoopMetrics::new(Some(&metrics_prefix::

(¶ms.lane)))?)? - .expose() - .await? - .run(metrics_prefix::

(¶ms.lane), move |source_client, target_client, metrics| { - run_until_connection_lost( - params.clone(), - source_client, - target_client, - metrics, - exit_signal.clone(), - ) - }) - .await -} - -/// Run one-way message delivery loop until connection with target or source node is lost, or exit -/// signal is received. -async fn run_until_connection_lost, TC: TargetClient

>( - params: Params, - source_client: SC, - target_client: TC, - metrics_msg: Option, - exit_signal: impl Future, -) -> Result<(), FailedClient> { - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = false; - let mut source_state_required = true; - let source_state = source_client.state().fuse(); - let source_go_offline_future = futures::future::Fuse::terminated(); - let source_tick_stream = interval(params.source_tick).fuse(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = false; - let mut target_state_required = true; - let target_state = target_client.state().fuse(); - let target_go_offline_future = futures::future::Fuse::terminated(); - let target_tick_stream = interval(params.target_tick).fuse(); - - let ( - (delivery_source_state_sender, delivery_source_state_receiver), - (delivery_target_state_sender, delivery_target_state_receiver), - ) = (unbounded(), unbounded()); - let delivery_race_loop = run_message_delivery_race( - source_client.clone(), - delivery_source_state_receiver, - target_client.clone(), - delivery_target_state_receiver, - metrics_msg.clone(), - params.delivery_params, - ) - .fuse(); - - let ( - (receiving_source_state_sender, receiving_source_state_receiver), - (receiving_target_state_sender, receiving_target_state_receiver), - ) = (unbounded(), unbounded()); - let receiving_race_loop = run_message_receiving_race( - source_client.clone(), - receiving_source_state_receiver, - target_client.clone(), - receiving_target_state_receiver, - metrics_msg.clone(), - ) - .fuse(); - - let exit_signal = exit_signal.fuse(); - - futures::pin_mut!( - source_state, - source_go_offline_future, - source_tick_stream, - target_state, - target_go_offline_future, - target_tick_stream, - delivery_race_loop, - receiving_race_loop, - exit_signal - ); - - loop { - futures::select! { - new_source_state = source_state => { - source_state_required = false; - - source_client_is_online = process_future_result( - new_source_state, - &mut source_retry_backoff, - |new_source_state| { - log::debug!( - target: "bridge", - "Received state from {} node: {:?}", - P::SOURCE_NAME, - new_source_state, - ); - let _ = delivery_source_state_sender.unbounded_send(new_source_state.clone()); - let _ = receiving_source_state_sender.unbounded_send(new_source_state.clone()); - - if let Some(metrics_msg) = metrics_msg.as_ref() { - metrics_msg.update_source_state::

(new_source_state); - } - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving state from {} node", P::SOURCE_NAME), - ).fail_if_connection_error(FailedClient::Source)?; - }, - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = source_tick_stream.next() => { - source_state_required = true; - }, - new_target_state = target_state => { - target_state_required = false; - - target_client_is_online = process_future_result( - new_target_state, - &mut target_retry_backoff, - |new_target_state| { - log::debug!( - target: "bridge", - "Received state from {} node: {:?}", - P::TARGET_NAME, - new_target_state, - ); - let _ = delivery_target_state_sender.unbounded_send(new_target_state.clone()); - let _ = receiving_target_state_sender.unbounded_send(new_target_state.clone()); - - if let Some(metrics_msg) = metrics_msg.as_ref() { - metrics_msg.update_target_state::

(new_target_state); - } - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving state from {} node", P::TARGET_NAME), - ).fail_if_connection_error(FailedClient::Target)?; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - _ = target_tick_stream.next() => { - target_state_required = true; - }, - - delivery_error = delivery_race_loop => { - match delivery_error { - Ok(_) => unreachable!("only ends with error; qed"), - Err(err) => return Err(err), - } - }, - receiving_error = receiving_race_loop => { - match receiving_error { - Ok(_) => unreachable!("only ends with error; qed"), - Err(err) => return Err(err), - } - }, - - () = exit_signal => { - return Ok(()); - } - } - - if source_client_is_online && source_state_required { - log::debug!(target: "bridge", "Asking {} node about its state", P::SOURCE_NAME); - source_state.set(source_client.state().fuse()); - source_client_is_online = false; - } - - if target_client_is_online && target_state_required { - log::debug!(target: "bridge", "Asking {} node about its state", P::TARGET_NAME); - target_state.set(target_client.state().fuse()); - target_client_is_online = false; - } - } -} - -#[cfg(test)] -pub(crate) mod tests { - use std::sync::Arc; - - use futures::stream::StreamExt; - use parking_lot::Mutex; - - use relay_utils::{HeaderId, MaybeConnectionError, TrackedTransactionStatus}; - - use super::*; - - pub fn header_id(number: TestSourceHeaderNumber) -> TestSourceHeaderId { - HeaderId(number, number) - } - - pub type TestSourceChainBalance = u64; - pub type TestSourceHeaderId = HeaderId; - pub type TestTargetHeaderId = HeaderId; - - pub type TestMessagesProof = (RangeInclusive, Option); - pub type TestMessagesReceivingProof = MessageNonce; - - pub type TestSourceHeaderNumber = u64; - pub type TestSourceHeaderHash = u64; - - pub type TestTargetHeaderNumber = u64; - pub type TestTargetHeaderHash = u64; - - #[derive(Debug)] - pub struct TestError; - - impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - true - } - } - - #[derive(Clone)] - pub struct TestMessageLane; - - impl MessageLane for TestMessageLane { - const SOURCE_NAME: &'static str = "TestSource"; - const TARGET_NAME: &'static str = "TestTarget"; - - type MessagesProof = TestMessagesProof; - type MessagesReceivingProof = TestMessagesReceivingProof; - - type SourceChainBalance = TestSourceChainBalance; - type SourceHeaderNumber = TestSourceHeaderNumber; - type SourceHeaderHash = TestSourceHeaderHash; - - type TargetHeaderNumber = TestTargetHeaderNumber; - type TargetHeaderHash = TestTargetHeaderHash; - } - - #[derive(Clone, Debug)] - pub struct TestMessagesBatchTransaction { - required_header_id: TestSourceHeaderId, - } - - #[async_trait] - impl BatchTransaction for TestMessagesBatchTransaction { - fn required_header_id(&self) -> TestSourceHeaderId { - self.required_header_id - } - } - - #[derive(Clone, Debug)] - pub struct TestConfirmationBatchTransaction { - required_header_id: TestTargetHeaderId, - } - - #[async_trait] - impl BatchTransaction for TestConfirmationBatchTransaction { - fn required_header_id(&self) -> TestTargetHeaderId { - self.required_header_id - } - } - - #[derive(Clone, Debug)] - pub struct TestTransactionTracker(TrackedTransactionStatus); - - impl Default for TestTransactionTracker { - fn default() -> TestTransactionTracker { - TestTransactionTracker(TrackedTransactionStatus::Finalized(Default::default())) - } - } - - #[async_trait] - impl TransactionTracker for TestTransactionTracker { - type HeaderId = TestTargetHeaderId; - - async fn wait(self) -> TrackedTransactionStatus { - self.0 - } - } - - #[derive(Debug, Clone)] - pub struct TestClientData { - is_source_fails: bool, - is_source_reconnected: bool, - source_state: SourceClientState, - source_latest_generated_nonce: MessageNonce, - source_latest_confirmed_received_nonce: MessageNonce, - source_tracked_transaction_status: TrackedTransactionStatus, - submitted_messages_receiving_proofs: Vec, - is_target_fails: bool, - is_target_reconnected: bool, - target_state: SourceClientState, - target_latest_received_nonce: MessageNonce, - target_latest_confirmed_received_nonce: MessageNonce, - target_tracked_transaction_status: TrackedTransactionStatus, - submitted_messages_proofs: Vec, - target_to_source_batch_transaction: Option, - target_to_source_header_required: Option, - target_to_source_header_requirements: Vec, - source_to_target_batch_transaction: Option, - source_to_target_header_required: Option, - source_to_target_header_requirements: Vec, - } - - impl Default for TestClientData { - fn default() -> TestClientData { - TestClientData { - is_source_fails: false, - is_source_reconnected: false, - source_state: Default::default(), - source_latest_generated_nonce: 0, - source_latest_confirmed_received_nonce: 0, - source_tracked_transaction_status: TrackedTransactionStatus::Finalized(HeaderId( - 0, - Default::default(), - )), - submitted_messages_receiving_proofs: Vec::new(), - is_target_fails: false, - is_target_reconnected: false, - target_state: Default::default(), - target_latest_received_nonce: 0, - target_latest_confirmed_received_nonce: 0, - target_tracked_transaction_status: TrackedTransactionStatus::Finalized(HeaderId( - 0, - Default::default(), - )), - submitted_messages_proofs: Vec::new(), - target_to_source_batch_transaction: None, - target_to_source_header_required: None, - target_to_source_header_requirements: Vec::new(), - source_to_target_batch_transaction: None, - source_to_target_header_required: None, - source_to_target_header_requirements: Vec::new(), - } - } - } - - impl TestClientData { - fn receive_messages( - &mut self, - maybe_batch_tx: Option, - proof: TestMessagesProof, - ) { - self.target_state.best_self = - HeaderId(self.target_state.best_self.0 + 1, self.target_state.best_self.1 + 1); - self.target_state.best_finalized_self = self.target_state.best_self; - self.target_latest_received_nonce = *proof.0.end(); - if let Some(maybe_batch_tx) = maybe_batch_tx { - self.target_state.best_finalized_peer_at_best_self = - Some(maybe_batch_tx.required_header_id()); - } - if let Some(target_latest_confirmed_received_nonce) = proof.1 { - self.target_latest_confirmed_received_nonce = - target_latest_confirmed_received_nonce; - } - self.submitted_messages_proofs.push(proof); - } - - fn receive_messages_delivery_proof( - &mut self, - maybe_batch_tx: Option, - proof: TestMessagesReceivingProof, - ) { - self.source_state.best_self = - HeaderId(self.source_state.best_self.0 + 1, self.source_state.best_self.1 + 1); - self.source_state.best_finalized_self = self.source_state.best_self; - if let Some(maybe_batch_tx) = maybe_batch_tx { - self.source_state.best_finalized_peer_at_best_self = - Some(maybe_batch_tx.required_header_id()); - } - self.submitted_messages_receiving_proofs.push(proof); - self.source_latest_confirmed_received_nonce = proof; - } - } - - #[derive(Clone)] - pub struct TestSourceClient { - data: Arc>, - tick: Arc, - post_tick: Arc, - } - - impl Default for TestSourceClient { - fn default() -> Self { - TestSourceClient { - data: Arc::new(Mutex::new(TestClientData::default())), - tick: Arc::new(|_| {}), - post_tick: Arc::new(|_| {}), - } - } - } - - #[async_trait] - impl RelayClient for TestSourceClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - { - let mut data = self.data.lock(); - (self.tick)(&mut data); - data.is_source_reconnected = true; - (self.post_tick)(&mut data); - } - Ok(()) - } - } - - #[async_trait] - impl SourceClient for TestSourceClient { - type BatchTransaction = TestConfirmationBatchTransaction; - type TransactionTracker = TestTransactionTracker; - - async fn state(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut data); - if data.is_source_fails { - return Err(TestError) - } - (self.post_tick)(&mut data); - Ok(data.source_state.clone()) - } - - async fn latest_generated_nonce( - &self, - id: SourceHeaderIdOf, - ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut data); - if data.is_source_fails { - return Err(TestError) - } - (self.post_tick)(&mut data); - Ok((id, data.source_latest_generated_nonce)) - } - - async fn latest_confirmed_received_nonce( - &self, - id: SourceHeaderIdOf, - ) -> Result<(SourceHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut data); - (self.post_tick)(&mut data); - Ok((id, data.source_latest_confirmed_received_nonce)) - } - - async fn generated_message_details( - &self, - _id: SourceHeaderIdOf, - nonces: RangeInclusive, - ) -> Result, TestError> { - Ok(nonces - .map(|nonce| { - ( - nonce, - MessageDetails { - dispatch_weight: Weight::from_parts(1, 0), - size: 1, - reward: 1, - }, - ) - }) - .collect()) - } - - async fn prove_messages( - &self, - id: SourceHeaderIdOf, - nonces: RangeInclusive, - proof_parameters: MessageProofParameters, - ) -> Result< - (SourceHeaderIdOf, RangeInclusive, TestMessagesProof), - TestError, - > { - let mut data = self.data.lock(); - (self.tick)(&mut data); - (self.post_tick)(&mut data); - Ok(( - id, - nonces.clone(), - ( - nonces, - if proof_parameters.outbound_state_proof_required { - Some(data.source_latest_confirmed_received_nonce) - } else { - None - }, - ), - )) - } - - async fn submit_messages_receiving_proof( - &self, - maybe_batch_tx: Option, - _generated_at_block: TargetHeaderIdOf, - proof: TestMessagesReceivingProof, - ) -> Result { - let mut data = self.data.lock(); - (self.tick)(&mut data); - data.receive_messages_delivery_proof(maybe_batch_tx, proof); - (self.post_tick)(&mut data); - Ok(TestTransactionTracker(data.source_tracked_transaction_status)) - } - - async fn require_target_header_on_source( - &self, - id: TargetHeaderIdOf, - ) -> Result, Self::Error> { - let mut data = self.data.lock(); - data.target_to_source_header_required = Some(id); - data.target_to_source_header_requirements.push(id); - (self.tick)(&mut data); - (self.post_tick)(&mut data); - - Ok(data.target_to_source_batch_transaction.take().map(|mut tx| { - tx.required_header_id = id; - tx - })) - } - } - - #[derive(Clone)] - pub struct TestTargetClient { - data: Arc>, - tick: Arc, - post_tick: Arc, - } - - impl Default for TestTargetClient { - fn default() -> Self { - TestTargetClient { - data: Arc::new(Mutex::new(TestClientData::default())), - tick: Arc::new(|_| {}), - post_tick: Arc::new(|_| {}), - } - } - } - - #[async_trait] - impl RelayClient for TestTargetClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - { - let mut data = self.data.lock(); - (self.tick)(&mut data); - data.is_target_reconnected = true; - (self.post_tick)(&mut data); - } - Ok(()) - } - } - - #[async_trait] - impl TargetClient for TestTargetClient { - type BatchTransaction = TestMessagesBatchTransaction; - type TransactionTracker = TestTransactionTracker; - - async fn state(&self) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut data); - if data.is_target_fails { - return Err(TestError) - } - (self.post_tick)(&mut data); - Ok(data.target_state.clone()) - } - - async fn latest_received_nonce( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut data); - if data.is_target_fails { - return Err(TestError) - } - (self.post_tick)(&mut data); - Ok((id, data.target_latest_received_nonce)) - } - - async fn unrewarded_relayers_state( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, UnrewardedRelayersState), TestError> { - Ok(( - id, - UnrewardedRelayersState { - unrewarded_relayer_entries: 0, - messages_in_oldest_entry: 0, - total_messages: 0, - last_delivered_nonce: 0, - }, - )) - } - - async fn latest_confirmed_received_nonce( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, MessageNonce), TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut data); - if data.is_target_fails { - return Err(TestError) - } - (self.post_tick)(&mut data); - Ok((id, data.target_latest_confirmed_received_nonce)) - } - - async fn prove_messages_receiving( - &self, - id: TargetHeaderIdOf, - ) -> Result<(TargetHeaderIdOf, TestMessagesReceivingProof), TestError> { - Ok((id, self.data.lock().target_latest_received_nonce)) - } - - async fn submit_messages_proof( - &self, - maybe_batch_tx: Option, - _generated_at_header: SourceHeaderIdOf, - nonces: RangeInclusive, - proof: TestMessagesProof, - ) -> Result, TestError> { - let mut data = self.data.lock(); - (self.tick)(&mut data); - if data.is_target_fails { - return Err(TestError) - } - data.receive_messages(maybe_batch_tx, proof); - (self.post_tick)(&mut data); - Ok(NoncesSubmitArtifacts { - nonces, - tx_tracker: TestTransactionTracker(data.target_tracked_transaction_status), - }) - } - - async fn require_source_header_on_target( - &self, - id: SourceHeaderIdOf, - ) -> Result, Self::Error> { - let mut data = self.data.lock(); - data.source_to_target_header_required = Some(id); - data.source_to_target_header_requirements.push(id); - (self.tick)(&mut data); - (self.post_tick)(&mut data); - - Ok(data.source_to_target_batch_transaction.take().map(|mut tx| { - tx.required_header_id = id; - tx - })) - } - } - - fn run_loop_test( - data: Arc>, - source_tick: Arc, - source_post_tick: Arc, - target_tick: Arc, - target_post_tick: Arc, - exit_signal: impl Future + 'static + Send, - ) -> TestClientData { - async_std::task::block_on(async { - let source_client = TestSourceClient { - data: data.clone(), - tick: source_tick, - post_tick: source_post_tick, - }; - let target_client = TestTargetClient { - data: data.clone(), - tick: target_tick, - post_tick: target_post_tick, - }; - let _ = run( - Params { - lane: LaneId([0, 0, 0, 0]), - source_tick: Duration::from_millis(100), - target_tick: Duration::from_millis(100), - reconnect_delay: Duration::from_millis(0), - delivery_params: MessageDeliveryParams { - max_unrewarded_relayer_entries_at_target: 4, - max_unconfirmed_nonces_at_target: 4, - max_messages_in_single_batch: 4, - max_messages_weight_in_single_batch: Weight::from_parts(4, 0), - max_messages_size_in_single_batch: 4, - }, - }, - source_client, - target_client, - MetricsParams::disabled(), - exit_signal, - ) - .await; - let result = data.lock().clone(); - result - }) - } - - #[test] - fn message_lane_loop_is_able_to_recover_from_connection_errors() { - // with this configuration, source client will return Err, making source client - // reconnect. Then the target client will fail with Err + reconnect. Then we finally - // able to deliver messages. - let (exit_sender, exit_receiver) = unbounded(); - let result = run_loop_test( - Arc::new(Mutex::new(TestClientData { - is_source_fails: true, - source_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - source_latest_generated_nonce: 1, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - target_latest_received_nonce: 0, - ..Default::default() - })), - Arc::new(|data: &mut TestClientData| { - if data.is_source_reconnected { - data.is_source_fails = false; - data.is_target_fails = true; - } - }), - Arc::new(|_| {}), - Arc::new(move |data: &mut TestClientData| { - if data.is_target_reconnected { - data.is_target_fails = false; - } - if data.target_state.best_finalized_peer_at_best_self.unwrap().0 < 10 { - data.target_state.best_finalized_peer_at_best_self = Some(HeaderId( - data.target_state.best_finalized_peer_at_best_self.unwrap().0 + 1, - data.target_state.best_finalized_peer_at_best_self.unwrap().0 + 1, - )); - } - if !data.submitted_messages_proofs.is_empty() { - exit_sender.unbounded_send(()).unwrap(); - } - }), - Arc::new(|_| {}), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - assert_eq!(result.submitted_messages_proofs, vec![(1..=1, None)],); - } - - #[test] - fn message_lane_loop_is_able_to_recover_from_unsuccessful_transaction() { - // with this configuration, both source and target clients will mine their transactions, but - // their corresponding nonce won't be udpated => reconnect will happen - let (exit_sender, exit_receiver) = unbounded(); - let result = run_loop_test( - Arc::new(Mutex::new(TestClientData { - source_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - source_latest_generated_nonce: 1, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - target_latest_received_nonce: 0, - ..Default::default() - })), - Arc::new(move |data: &mut TestClientData| { - // blocks are produced on every tick - data.source_state.best_self = - HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.1 + 1); - data.source_state.best_finalized_self = data.source_state.best_self; - // syncing target headers -> source chain - if let Some(last_requirement) = data.target_to_source_header_requirements.last() { - if *last_requirement != - data.source_state.best_finalized_peer_at_best_self.unwrap() - { - data.source_state.best_finalized_peer_at_best_self = - Some(*last_requirement); - } - } - }), - Arc::new(move |data: &mut TestClientData| { - // if it is the first time we're submitting delivery proof, let's revert changes - // to source status => then the delivery confirmation transaction is "finalized", - // but the state is not altered - if data.submitted_messages_receiving_proofs.len() == 1 { - data.source_latest_confirmed_received_nonce = 0; - } - }), - Arc::new(move |data: &mut TestClientData| { - // blocks are produced on every tick - data.target_state.best_self = - HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); - data.target_state.best_finalized_self = data.target_state.best_self; - // syncing source headers -> target chain - if let Some(last_requirement) = data.source_to_target_header_requirements.last() { - if *last_requirement != - data.target_state.best_finalized_peer_at_best_self.unwrap() - { - data.target_state.best_finalized_peer_at_best_self = - Some(*last_requirement); - } - } - // if source has received all messages receiving confirmations => stop - if data.source_latest_confirmed_received_nonce == 1 { - exit_sender.unbounded_send(()).unwrap(); - } - }), - Arc::new(move |data: &mut TestClientData| { - // if it is the first time we're submitting messages proof, let's revert changes - // to target status => then the messages delivery transaction is "finalized", but - // the state is not altered - if data.submitted_messages_proofs.len() == 1 { - data.target_latest_received_nonce = 0; - data.target_latest_confirmed_received_nonce = 0; - } - }), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - assert_eq!(result.submitted_messages_proofs.len(), 2); - assert_eq!(result.submitted_messages_receiving_proofs.len(), 2); - } - - #[test] - fn message_lane_loop_works() { - let (exit_sender, exit_receiver) = unbounded(); - let result = run_loop_test( - Arc::new(Mutex::new(TestClientData { - source_state: ClientState { - best_self: HeaderId(10, 10), - best_finalized_self: HeaderId(10, 10), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - source_latest_generated_nonce: 10, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - target_latest_received_nonce: 0, - ..Default::default() - })), - Arc::new(|data: &mut TestClientData| { - // blocks are produced on every tick - data.source_state.best_self = - HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.1 + 1); - data.source_state.best_finalized_self = data.source_state.best_self; - // headers relay must only be started when we need new target headers at source node - if data.target_to_source_header_required.is_some() { - assert!( - data.source_state.best_finalized_peer_at_best_self.unwrap().0 < - data.target_state.best_self.0 - ); - data.target_to_source_header_required = None; - } - // syncing target headers -> source chain - if let Some(last_requirement) = data.target_to_source_header_requirements.last() { - if *last_requirement != - data.source_state.best_finalized_peer_at_best_self.unwrap() - { - data.source_state.best_finalized_peer_at_best_self = - Some(*last_requirement); - } - } - }), - Arc::new(|_| {}), - Arc::new(move |data: &mut TestClientData| { - // blocks are produced on every tick - data.target_state.best_self = - HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); - data.target_state.best_finalized_self = data.target_state.best_self; - // headers relay must only be started when we need new source headers at target node - if data.source_to_target_header_required.is_some() { - assert!( - data.target_state.best_finalized_peer_at_best_self.unwrap().0 < - data.source_state.best_self.0 - ); - data.source_to_target_header_required = None; - } - // syncing source headers -> target chain - if let Some(last_requirement) = data.source_to_target_header_requirements.last() { - if *last_requirement != - data.target_state.best_finalized_peer_at_best_self.unwrap() - { - data.target_state.best_finalized_peer_at_best_self = - Some(*last_requirement); - } - } - // if source has received all messages receiving confirmations => stop - if data.source_latest_confirmed_received_nonce == 10 { - exit_sender.unbounded_send(()).unwrap(); - } - }), - Arc::new(|_| {}), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - // there are no strict restrictions on when reward confirmation should come - // (because `max_unconfirmed_nonces_at_target` is `100` in tests and this confirmation - // depends on the state of both clients) - // => we do not check it here - assert_eq!(result.submitted_messages_proofs[0].0, 1..=4); - assert_eq!(result.submitted_messages_proofs[1].0, 5..=8); - assert_eq!(result.submitted_messages_proofs[2].0, 9..=10); - assert!(!result.submitted_messages_receiving_proofs.is_empty()); - - // check that we have at least once required new source->target or target->source headers - assert!(!result.target_to_source_header_requirements.is_empty()); - assert!(!result.source_to_target_header_requirements.is_empty()); - } - - #[test] - fn message_lane_loop_works_with_batch_transactions() { - let (exit_sender, exit_receiver) = unbounded(); - let original_data = Arc::new(Mutex::new(TestClientData { - source_state: ClientState { - best_self: HeaderId(10, 10), - best_finalized_self: HeaderId(10, 10), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - source_latest_generated_nonce: 10, - target_state: ClientState { - best_self: HeaderId(0, 0), - best_finalized_self: HeaderId(0, 0), - best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - actual_best_finalized_peer_at_best_self: Some(HeaderId(0, 0)), - }, - target_latest_received_nonce: 0, - ..Default::default() - })); - let result = run_loop_test( - original_data, - Arc::new(|_| {}), - Arc::new(move |data: &mut TestClientData| { - data.source_state.best_self = - HeaderId(data.source_state.best_self.0 + 1, data.source_state.best_self.1 + 1); - data.source_state.best_finalized_self = data.source_state.best_self; - if let Some(target_to_source_header_required) = - data.target_to_source_header_required.take() - { - data.target_to_source_batch_transaction = - Some(TestConfirmationBatchTransaction { - required_header_id: target_to_source_header_required, - }) - } - }), - Arc::new(|_| {}), - Arc::new(move |data: &mut TestClientData| { - data.target_state.best_self = - HeaderId(data.target_state.best_self.0 + 1, data.target_state.best_self.1 + 1); - data.target_state.best_finalized_self = data.target_state.best_self; - - if let Some(source_to_target_header_required) = - data.source_to_target_header_required.take() - { - data.source_to_target_batch_transaction = Some(TestMessagesBatchTransaction { - required_header_id: source_to_target_header_required, - }) - } - - if data.source_latest_confirmed_received_nonce == 10 { - exit_sender.unbounded_send(()).unwrap(); - } - }), - exit_receiver.into_future().map(|(_, _)| ()), - ); - - // there are no strict restrictions on when reward confirmation should come - // (because `max_unconfirmed_nonces_at_target` is `100` in tests and this confirmation - // depends on the state of both clients) - // => we do not check it here - assert_eq!(result.submitted_messages_proofs[0].0, 1..=4); - assert_eq!(result.submitted_messages_proofs[1].0, 5..=8); - assert_eq!(result.submitted_messages_proofs[2].0, 9..=10); - assert!(!result.submitted_messages_receiving_proofs.is_empty()); - - // check that we have at least once required new source->target or target->source headers - assert!(!result.target_to_source_header_requirements.is_empty()); - assert!(!result.source_to_target_header_requirements.is_empty()); - } -} diff --git a/relays/messages/src/message_race_delivery.rs b/relays/messages/src/message_race_delivery.rs deleted file mode 100644 index 137deb5b74f757aa111d5652cbb251a94979e166..0000000000000000000000000000000000000000 --- a/relays/messages/src/message_race_delivery.rs +++ /dev/null @@ -1,1405 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Message delivery race delivers proof-of-messages from "lane.source" to "lane.target". - -use std::{collections::VecDeque, marker::PhantomData, ops::RangeInclusive}; - -use async_trait::async_trait; -use futures::stream::FusedStream; - -use bp_messages::{MessageNonce, UnrewardedRelayersState, Weight}; -use relay_utils::FailedClient; - -use crate::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{ - MessageDeliveryParams, MessageDetailsMap, MessageProofParameters, NoncesSubmitArtifacts, - SourceClient as MessageLaneSourceClient, SourceClientState, - TargetClient as MessageLaneTargetClient, TargetClientState, - }, - message_race_limits::{MessageRaceLimits, RelayMessagesBatchReference}, - message_race_loop::{ - MessageRace, NoncesRange, RaceState, RaceStrategy, SourceClient, SourceClientNonces, - TargetClient, TargetClientNonces, - }, - message_race_strategy::BasicStrategy, - metrics::MessageLaneLoopMetrics, -}; - -/// Run message delivery race. -pub async fn run( - source_client: impl MessageLaneSourceClient

, - source_state_updates: impl FusedStream>, - target_client: impl MessageLaneTargetClient

, - target_state_updates: impl FusedStream>, - metrics_msg: Option, - params: MessageDeliveryParams, -) -> Result<(), FailedClient> { - crate::message_race_loop::run( - MessageDeliveryRaceSource { - client: source_client.clone(), - metrics_msg: metrics_msg.clone(), - _phantom: Default::default(), - }, - source_state_updates, - MessageDeliveryRaceTarget { - client: target_client.clone(), - metrics_msg: metrics_msg.clone(), - _phantom: Default::default(), - }, - target_state_updates, - MessageDeliveryStrategy:: { - lane_source_client: source_client, - lane_target_client: target_client, - max_unrewarded_relayer_entries_at_target: params - .max_unrewarded_relayer_entries_at_target, - max_unconfirmed_nonces_at_target: params.max_unconfirmed_nonces_at_target, - max_messages_in_single_batch: params.max_messages_in_single_batch, - max_messages_weight_in_single_batch: params.max_messages_weight_in_single_batch, - max_messages_size_in_single_batch: params.max_messages_size_in_single_batch, - latest_confirmed_nonces_at_source: VecDeque::new(), - target_nonces: None, - strategy: BasicStrategy::new(), - metrics_msg, - }, - ) - .await -} - -/// Message delivery race. -struct MessageDeliveryRace

(std::marker::PhantomData

); - -impl MessageRace for MessageDeliveryRace

{ - type SourceHeaderId = SourceHeaderIdOf

; - type TargetHeaderId = TargetHeaderIdOf

; - - type MessageNonce = MessageNonce; - type Proof = P::MessagesProof; - - fn source_name() -> String { - format!("{}::MessagesDelivery", P::SOURCE_NAME) - } - - fn target_name() -> String { - format!("{}::MessagesDelivery", P::TARGET_NAME) - } -} - -/// Message delivery race source, which is a source of the lane. -struct MessageDeliveryRaceSource { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl SourceClient> for MessageDeliveryRaceSource -where - P: MessageLane, - C: MessageLaneSourceClient

, -{ - type Error = C::Error; - type NoncesRange = MessageDetailsMap; - type ProofParameters = MessageProofParameters; - - async fn nonces( - &self, - at_block: SourceHeaderIdOf

, - prev_latest_nonce: MessageNonce, - ) -> Result<(SourceHeaderIdOf

, SourceClientNonces), Self::Error> { - let (at_block, latest_generated_nonce) = - self.client.latest_generated_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = - self.client.latest_confirmed_received_nonce(at_block).await?; - - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_source_latest_generated_nonce(latest_generated_nonce); - metrics_msg.update_source_latest_confirmed_nonce(latest_confirmed_nonce); - } - - let new_nonces = if latest_generated_nonce > prev_latest_nonce { - self.client - .generated_message_details( - at_block.clone(), - prev_latest_nonce + 1..=latest_generated_nonce, - ) - .await? - } else { - MessageDetailsMap::new() - }; - - Ok(( - at_block, - SourceClientNonces { new_nonces, confirmed_nonce: Some(latest_confirmed_nonce) }, - )) - } - - async fn generate_proof( - &self, - at_block: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof_parameters: Self::ProofParameters, - ) -> Result<(SourceHeaderIdOf

, RangeInclusive, P::MessagesProof), Self::Error> - { - self.client.prove_messages(at_block, nonces, proof_parameters).await - } -} - -/// Message delivery race target, which is a target of the lane. -struct MessageDeliveryRaceTarget { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl TargetClient> for MessageDeliveryRaceTarget -where - P: MessageLane, - C: MessageLaneTargetClient

, -{ - type Error = C::Error; - type TargetNoncesData = DeliveryRaceTargetNoncesData; - type BatchTransaction = C::BatchTransaction; - type TransactionTracker = C::TransactionTracker; - - async fn require_source_header( - &self, - id: SourceHeaderIdOf

, - ) -> Result, Self::Error> { - self.client.require_source_header_on_target(id).await - } - - async fn nonces( - &self, - at_block: TargetHeaderIdOf

, - update_metrics: bool, - ) -> Result<(TargetHeaderIdOf

, TargetClientNonces), Self::Error> - { - let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; - let (at_block, latest_confirmed_nonce) = - self.client.latest_confirmed_received_nonce(at_block).await?; - let (at_block, unrewarded_relayers) = - self.client.unrewarded_relayers_state(at_block).await?; - - if update_metrics { - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_target_latest_received_nonce(latest_received_nonce); - metrics_msg.update_target_latest_confirmed_nonce(latest_confirmed_nonce); - } - } - - Ok(( - at_block, - TargetClientNonces { - latest_nonce: latest_received_nonce, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: latest_confirmed_nonce, - unrewarded_relayers, - }, - }, - )) - } - - async fn submit_proof( - &self, - maybe_batch_tx: Option, - generated_at_block: SourceHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesProof, - ) -> Result, Self::Error> { - self.client - .submit_messages_proof(maybe_batch_tx, generated_at_block, nonces, proof) - .await - } -} - -/// Additional nonces data from the target client used by message delivery race. -#[derive(Debug, Clone)] -struct DeliveryRaceTargetNoncesData { - /// The latest nonce that we know: (1) has been delivered to us (2) has been confirmed - /// back to the source node (by confirmations race) and (3) relayer has received - /// reward for (and this has been confirmed by the message delivery race). - confirmed_nonce: MessageNonce, - /// State of the unrewarded relayers set at the target node. - unrewarded_relayers: UnrewardedRelayersState, -} - -/// Messages delivery strategy. -struct MessageDeliveryStrategy { - /// The client that is connected to the message lane source node. - lane_source_client: SC, - /// The client that is connected to the message lane target node. - lane_target_client: TC, - /// Maximal unrewarded relayer entries at target client. - max_unrewarded_relayer_entries_at_target: MessageNonce, - /// Maximal unconfirmed nonces at target client. - max_unconfirmed_nonces_at_target: MessageNonce, - /// Maximal number of messages in the single delivery transaction. - max_messages_in_single_batch: MessageNonce, - /// Maximal cumulative messages weight in the single delivery transaction. - max_messages_weight_in_single_batch: Weight, - /// Maximal messages size in the single delivery transaction. - max_messages_size_in_single_batch: u32, - /// Latest confirmed nonces at the source client + the header id where we have first met this - /// nonce. - latest_confirmed_nonces_at_source: VecDeque<(SourceHeaderIdOf

, MessageNonce)>, - /// Target nonces available at the **best** block of the target chain. - target_nonces: Option>, - /// Basic delivery strategy. - strategy: MessageDeliveryStrategyBase

, - /// Message lane metrics. - metrics_msg: Option, -} - -type MessageDeliveryStrategyBase

= BasicStrategy< -

::SourceHeaderNumber, -

::SourceHeaderHash, -

::TargetHeaderNumber, -

::TargetHeaderHash, - MessageDetailsMap<

::SourceChainBalance>, -

::MessagesProof, ->; - -impl std::fmt::Debug for MessageDeliveryStrategy { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("MessageDeliveryStrategy") - .field( - "max_unrewarded_relayer_entries_at_target", - &self.max_unrewarded_relayer_entries_at_target, - ) - .field("max_unconfirmed_nonces_at_target", &self.max_unconfirmed_nonces_at_target) - .field("max_messages_in_single_batch", &self.max_messages_in_single_batch) - .field("max_messages_weight_in_single_batch", &self.max_messages_weight_in_single_batch) - .field("max_messages_size_in_single_batch", &self.max_messages_size_in_single_batch) - .field("latest_confirmed_nonces_at_source", &self.latest_confirmed_nonces_at_source) - .field("target_nonces", &self.target_nonces) - .field("strategy", &self.strategy) - .finish() - } -} - -impl MessageDeliveryStrategy -where - P: MessageLane, - SC: MessageLaneSourceClient

, - TC: MessageLaneTargetClient

, -{ - /// Returns true if some race action can be selected (with `select_race_action`) at given - /// `best_finalized_source_header_id_at_best_target` source header at target. - async fn can_submit_transaction_with< - RS: RaceState, TargetHeaderIdOf

>, - >( - &self, - mut race_state: RS, - maybe_best_finalized_source_header_id_at_best_target: Option>, - ) -> bool { - if let Some(best_finalized_source_header_id_at_best_target) = - maybe_best_finalized_source_header_id_at_best_target - { - race_state.set_best_finalized_source_header_id_at_best_target( - best_finalized_source_header_id_at_best_target, - ); - - return self.select_race_action(race_state).await.is_some() - } - - false - } - - async fn select_race_action, TargetHeaderIdOf

>>( - &self, - race_state: RS, - ) -> Option<(RangeInclusive, MessageProofParameters)> { - // if we have already selected nonces that we want to submit, do nothing - if race_state.nonces_to_submit().is_some() { - return None - } - - // if we already submitted some nonces, do nothing - if race_state.nonces_submitted().is_some() { - return None - } - - let best_target_nonce = self.strategy.best_at_target()?; - let best_finalized_source_header_id_at_best_target = - race_state.best_finalized_source_header_id_at_best_target()?; - let target_nonces = self.target_nonces.as_ref()?; - let latest_confirmed_nonce_at_source = self - .latest_confirmed_nonce_at_source(&best_finalized_source_header_id_at_best_target) - .unwrap_or(target_nonces.nonces_data.confirmed_nonce); - - // There's additional condition in the message delivery race: target would reject messages - // if there are too much unconfirmed messages at the inbound lane. - - // Ok - we may have new nonces to deliver. But target may still reject new messages, because - // we haven't notified it that (some) messages have been confirmed. So we may want to - // include updated `source.latest_confirmed` in the proof. - // - // Important note: we're including outbound state lane proof whenever there are unconfirmed - // nonces on the target chain. Other strategy is to include it only if it's absolutely - // necessary. - let latest_received_nonce_at_target = target_nonces.latest_nonce; - let latest_confirmed_nonce_at_target = target_nonces.nonces_data.confirmed_nonce; - let outbound_state_proof_required = - latest_confirmed_nonce_at_target < latest_confirmed_nonce_at_source; - - // The target node would also reject messages if there are too many entries in the - // "unrewarded relayers" set. If we are unable to prove new rewards to the target node, then - // we should wait for confirmations race. - let unrewarded_limit_reached = - target_nonces.nonces_data.unrewarded_relayers.unrewarded_relayer_entries >= - self.max_unrewarded_relayer_entries_at_target || - target_nonces.nonces_data.unrewarded_relayers.total_messages >= - self.max_unconfirmed_nonces_at_target; - if unrewarded_limit_reached { - // so there are already too many unrewarded relayer entries in the set - // - // => check if we can prove enough rewards. If not, we should wait for more rewards to - // be paid - let number_of_rewards_being_proved = - latest_confirmed_nonce_at_source.saturating_sub(latest_confirmed_nonce_at_target); - let enough_rewards_being_proved = number_of_rewards_being_proved >= - target_nonces.nonces_data.unrewarded_relayers.messages_in_oldest_entry; - if !enough_rewards_being_proved { - return None - } - } - - // If we're here, then the confirmations race did its job && sending side now knows that - // messages have been delivered. Now let's select nonces that we want to deliver. - // - // We may deliver at most: - // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - - // latest_confirmed_nonce_at_target) - // - // messages in the batch. But since we're including outbound state proof in the batch, then - // it may be increased to: - // - // max_unconfirmed_nonces_at_target - (latest_received_nonce_at_target - - // latest_confirmed_nonce_at_source) - let future_confirmed_nonce_at_target = if outbound_state_proof_required { - latest_confirmed_nonce_at_source - } else { - latest_confirmed_nonce_at_target - }; - let max_nonces = latest_received_nonce_at_target - .checked_sub(future_confirmed_nonce_at_target) - .and_then(|diff| self.max_unconfirmed_nonces_at_target.checked_sub(diff)) - .unwrap_or_default(); - let max_nonces = std::cmp::min(max_nonces, self.max_messages_in_single_batch); - let max_messages_weight_in_single_batch = self.max_messages_weight_in_single_batch; - let max_messages_size_in_single_batch = self.max_messages_size_in_single_batch; - let lane_source_client = self.lane_source_client.clone(); - let lane_target_client = self.lane_target_client.clone(); - - // select nonces from nonces, available for delivery - let selected_nonces = match self.strategy.available_source_queue_indices(race_state) { - Some(available_source_queue_indices) => { - let source_queue = self.strategy.source_queue(); - let reference = RelayMessagesBatchReference { - max_messages_in_this_batch: max_nonces, - max_messages_weight_in_single_batch, - max_messages_size_in_single_batch, - lane_source_client: lane_source_client.clone(), - lane_target_client: lane_target_client.clone(), - best_target_nonce, - nonces_queue: source_queue.clone(), - nonces_queue_range: available_source_queue_indices, - metrics: self.metrics_msg.clone(), - }; - - MessageRaceLimits::decide(reference).await - }, - None => { - // we still may need to submit delivery transaction with zero messages to - // unblock the lane. But it'll only be accepted if the lane is blocked - // (i.e. when `unrewarded_limit_reached` is `true`) - None - }, - }; - - // check if we need unblocking transaction and we may submit it - #[allow(clippy::reversed_empty_ranges)] - let selected_nonces = match selected_nonces { - Some(selected_nonces) => selected_nonces, - None if unrewarded_limit_reached && outbound_state_proof_required => 1..=0, - _ => return None, - }; - - let dispatch_weight = self.dispatch_weight_for_range(&selected_nonces); - Some(( - selected_nonces, - MessageProofParameters { outbound_state_proof_required, dispatch_weight }, - )) - } - - /// Returns lastest confirmed message at source chain, given source block. - fn latest_confirmed_nonce_at_source(&self, at: &SourceHeaderIdOf

) -> Option { - self.latest_confirmed_nonces_at_source - .iter() - .take_while(|(id, _)| id.0 <= at.0) - .last() - .map(|(_, nonce)| *nonce) - } - - /// Returns total weight of all undelivered messages. - fn dispatch_weight_for_range(&self, range: &RangeInclusive) -> Weight { - self.strategy - .source_queue() - .iter() - .flat_map(|(_, subrange)| { - subrange - .iter() - .filter(|(nonce, _)| range.contains(nonce)) - .map(|(_, details)| details.dispatch_weight) - }) - .fold(Weight::zero(), |total, weight| total.saturating_add(weight)) - } -} - -#[async_trait] -impl RaceStrategy, TargetHeaderIdOf

, P::MessagesProof> - for MessageDeliveryStrategy -where - P: MessageLane, - SC: MessageLaneSourceClient

, - TC: MessageLaneTargetClient

, -{ - type SourceNoncesRange = MessageDetailsMap; - type ProofParameters = MessageProofParameters; - type TargetNoncesData = DeliveryRaceTargetNoncesData; - - fn is_empty(&self) -> bool { - self.strategy.is_empty() - } - - async fn required_source_header_at_target< - RS: RaceState, TargetHeaderIdOf

>, - >( - &self, - race_state: RS, - ) -> Option> { - // we have already submitted something - let's wait until it is mined - if race_state.nonces_submitted().is_some() { - return None - } - - // if we can deliver something using current race state, go on - let selected_nonces = self.select_race_action(race_state.clone()).await; - if selected_nonces.is_some() { - return None - } - - // check if we may deliver some messages if we'll relay require source header - // to target first - let maybe_source_header_for_delivery = - self.strategy.source_queue().back().map(|(id, _)| id.clone()); - if self - .can_submit_transaction_with( - race_state.clone(), - maybe_source_header_for_delivery.clone(), - ) - .await - { - return maybe_source_header_for_delivery - } - - // ok, we can't delivery anything even if we relay some source blocks first. But maybe - // the lane is blocked and we need to submit unblock transaction? - let maybe_source_header_for_reward_confirmation = - self.latest_confirmed_nonces_at_source.back().map(|(id, _)| id.clone()); - if self - .can_submit_transaction_with( - race_state.clone(), - maybe_source_header_for_reward_confirmation.clone(), - ) - .await - { - return maybe_source_header_for_reward_confirmation - } - - None - } - - fn best_at_source(&self) -> Option { - self.strategy.best_at_source() - } - - fn best_at_target(&self) -> Option { - self.strategy.best_at_target() - } - - fn source_nonces_updated( - &mut self, - at_block: SourceHeaderIdOf

, - nonces: SourceClientNonces, - ) { - if let Some(confirmed_nonce) = nonces.confirmed_nonce { - let is_confirmed_nonce_updated = self - .latest_confirmed_nonces_at_source - .back() - .map(|(_, prev_nonce)| *prev_nonce != confirmed_nonce) - .unwrap_or(true); - if is_confirmed_nonce_updated { - self.latest_confirmed_nonces_at_source - .push_back((at_block.clone(), confirmed_nonce)); - } - } - self.strategy.source_nonces_updated(at_block, nonces) - } - - fn reset_best_target_nonces(&mut self) { - self.target_nonces = None; - self.strategy.reset_best_target_nonces(); - } - - fn best_target_nonces_updated, TargetHeaderIdOf

>>( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RS, - ) { - // best target nonces must always be ge than finalized target nonces - let latest_nonce = nonces.latest_nonce; - self.target_nonces = Some(nonces); - - self.strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce, nonces_data: () }, - race_state, - ) - } - - fn finalized_target_nonces_updated, TargetHeaderIdOf

>>( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RS, - ) { - if let Some(ref best_finalized_source_header_id_at_best_target) = - race_state.best_finalized_source_header_id_at_best_target() - { - let oldest_header_number_to_keep = best_finalized_source_header_id_at_best_target.0; - while self - .latest_confirmed_nonces_at_source - .front() - .map(|(id, _)| id.0 < oldest_header_number_to_keep) - .unwrap_or(false) - { - self.latest_confirmed_nonces_at_source.pop_front(); - } - } - - if let Some(ref mut target_nonces) = self.target_nonces { - target_nonces.latest_nonce = - std::cmp::max(target_nonces.latest_nonce, nonces.latest_nonce); - } - - self.strategy.finalized_target_nonces_updated( - TargetClientNonces { latest_nonce: nonces.latest_nonce, nonces_data: () }, - race_state, - ) - } - - async fn select_nonces_to_deliver, TargetHeaderIdOf

>>( - &self, - race_state: RS, - ) -> Option<(RangeInclusive, Self::ProofParameters)> { - self.select_race_action(race_state).await - } -} - -impl NoncesRange for MessageDetailsMap { - fn begin(&self) -> MessageNonce { - self.keys().next().cloned().unwrap_or_default() - } - - fn end(&self) -> MessageNonce { - self.keys().next_back().cloned().unwrap_or_default() - } - - fn greater_than(mut self, nonce: MessageNonce) -> Option { - let gte = self.split_off(&(nonce + 1)); - if gte.is_empty() { - None - } else { - Some(gte) - } - } -} - -#[cfg(test)] -mod tests { - use crate::{ - message_lane_loop::{ - tests::{ - header_id, TestMessageLane, TestMessagesBatchTransaction, TestMessagesProof, - TestSourceChainBalance, TestSourceClient, TestSourceHeaderId, TestTargetClient, - TestTargetHeaderId, - }, - MessageDetails, - }, - message_race_loop::RaceStateImpl, - }; - - use super::*; - - const DEFAULT_DISPATCH_WEIGHT: Weight = Weight::from_parts(1, 0); - const DEFAULT_SIZE: u32 = 1; - - type TestRaceState = RaceStateImpl< - TestSourceHeaderId, - TestTargetHeaderId, - TestMessagesProof, - TestMessagesBatchTransaction, - >; - type TestStrategy = - MessageDeliveryStrategy; - - fn source_nonces( - new_nonces: RangeInclusive, - confirmed_nonce: MessageNonce, - reward: TestSourceChainBalance, - ) -> SourceClientNonces> { - SourceClientNonces { - new_nonces: new_nonces - .into_iter() - .map(|nonce| { - ( - nonce, - MessageDetails { - dispatch_weight: DEFAULT_DISPATCH_WEIGHT, - size: DEFAULT_SIZE, - reward, - }, - ) - }) - .collect(), - confirmed_nonce: Some(confirmed_nonce), - } - } - - fn prepare_strategy() -> (TestRaceState, TestStrategy) { - let mut race_state = RaceStateImpl { - best_finalized_source_header_id_at_source: Some(header_id(1)), - best_finalized_source_header_id_at_best_target: Some(header_id(1)), - best_target_header_id: Some(header_id(1)), - best_finalized_target_header_id: Some(header_id(1)), - nonces_to_submit: None, - nonces_to_submit_batch: None, - nonces_submitted: None, - }; - - let mut race_strategy = TestStrategy { - max_unrewarded_relayer_entries_at_target: 4, - max_unconfirmed_nonces_at_target: 4, - max_messages_in_single_batch: 4, - max_messages_weight_in_single_batch: Weight::from_parts(4, 0), - max_messages_size_in_single_batch: 4, - latest_confirmed_nonces_at_source: vec![(header_id(1), 19)].into_iter().collect(), - lane_source_client: TestSourceClient::default(), - lane_target_client: TestTargetClient::default(), - metrics_msg: None, - target_nonces: Some(TargetClientNonces { - latest_nonce: 19, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState { - unrewarded_relayer_entries: 0, - messages_in_oldest_entry: 0, - total_messages: 0, - last_delivered_nonce: 0, - }, - }, - }), - strategy: BasicStrategy::new(), - }; - - race_strategy - .strategy - .source_nonces_updated(header_id(1), source_nonces(20..=23, 19, 0)); - - let target_nonces = TargetClientNonces { latest_nonce: 19, nonces_data: () }; - race_strategy - .strategy - .best_target_nonces_updated(target_nonces.clone(), &mut race_state); - race_strategy - .strategy - .finalized_target_nonces_updated(target_nonces, &mut race_state); - - (race_state, race_strategy) - } - - fn proof_parameters(state_required: bool, weight: u32) -> MessageProofParameters { - MessageProofParameters { - outbound_state_proof_required: state_required, - dispatch_weight: Weight::from_parts(weight as u64, 0), - } - } - - #[test] - fn weights_map_works_as_nonces_range() { - fn build_map( - range: RangeInclusive, - ) -> MessageDetailsMap { - range - .map(|idx| { - ( - idx, - MessageDetails { - dispatch_weight: Weight::from_parts(idx, 0), - size: idx as _, - reward: idx as _, - }, - ) - }) - .collect() - } - - let map = build_map(20..=30); - - assert_eq!(map.begin(), 20); - assert_eq!(map.end(), 30); - assert_eq!(map.clone().greater_than(10), Some(build_map(20..=30))); - assert_eq!(map.clone().greater_than(19), Some(build_map(20..=30))); - assert_eq!(map.clone().greater_than(20), Some(build_map(21..=30))); - assert_eq!(map.clone().greater_than(25), Some(build_map(26..=30))); - assert_eq!(map.clone().greater_than(29), Some(build_map(30..=30))); - assert_eq!(map.greater_than(30), None); - } - - #[async_std::test] - async fn message_delivery_strategy_selects_messages_to_deliver() { - let (state, strategy) = prepare_strategy(); - - // both sides are ready to relay new messages - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(false, 4))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_includes_outbound_state_proof_when_new_nonces_are_available() - { - let (state, mut strategy) = prepare_strategy(); - - // if there are new confirmed nonces on source, we want to relay this information - // to target to prune rewards queue - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_selects_nothing_if_there_are_too_many_unrewarded_relayers() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to wait until rewards will be paid - { - let unrewarded_relayers = - &mut strategy.target_nonces.as_mut().unwrap().nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = - strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 4; - } - assert_eq!(strategy.select_nonces_to_deliver(state).await, None); - } - - #[async_std::test] - async fn message_delivery_strategy_selects_nothing_if_proved_rewards_is_not_enough_to_remove_oldest_unrewarded_entry( - ) { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - { - let nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; - nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 1; - let unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = - strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 4; - } - assert_eq!(strategy.select_nonces_to_deliver(state).await, None); - } - - #[async_std::test] - async fn message_delivery_strategy_includes_outbound_state_proof_if_proved_rewards_is_enough() { - let (state, mut strategy) = prepare_strategy(); - - // if there are already `max_unrewarded_relayer_entries_at_target` entries at target, - // we need to prove at least `messages_in_oldest_entry` rewards - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - { - let nonces_data = &mut strategy.target_nonces.as_mut().unwrap().nonces_data; - nonces_data.confirmed_nonce = prev_confirmed_nonce_at_source - 3; - let unrewarded_relayers = &mut nonces_data.unrewarded_relayers; - unrewarded_relayers.unrewarded_relayer_entries = - strategy.max_unrewarded_relayer_entries_at_target; - unrewarded_relayers.messages_in_oldest_entry = 3; - } - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_weight() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max weight - strategy.max_messages_weight_in_single_batch = Weight::from_parts(3, 0); - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_weight( - ) { - let (state, mut strategy) = prepare_strategy(); - - // first message doesn't fit in the batch, because it has weight (10) that overflows max - // weight (4) - strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().dispatch_weight = - Weight::from_parts(10, 0); - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=20), proof_parameters(false, 10))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_size() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max weight - strategy.max_messages_size_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_accepts_single_message_even_if_its_weight_overflows_maximal_size( - ) { - let (state, mut strategy) = prepare_strategy(); - - // first message doesn't fit in the batch, because it has weight (10) that overflows max - // weight (4) - strategy.strategy.source_queue_mut()[0].1.get_mut(&20).unwrap().size = 10; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=20), proof_parameters(false, 1))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_is_upper_limit() { - let (state, mut strategy) = prepare_strategy(); - - // not all queued messages may fit in the batch, because batch has max number of messages - // limit - strategy.max_messages_in_single_batch = 3; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_limits_batch_by_messages_count_when_there_are_unconfirmed_nonces( - ) { - let (state, mut strategy) = prepare_strategy(); - - // 1 delivery confirmation from target to source is still missing, so we may only - // relay 3 new messages - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = - vec![(header_id(1), prev_confirmed_nonce_at_source - 1)].into_iter().collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - } - - #[async_std::test] - async fn message_delivery_strategy_waits_for_confirmed_nonce_header_to_appear_on_target() { - // 1 delivery confirmation from target to source is still missing, so we may deliver - // reward confirmation with our message delivery transaction. But the problem is that - // the reward has been paid at header 2 && this header is still unknown to target node. - // - // => so we can't deliver more than 3 messages - let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![ - (header_id(1), prev_confirmed_nonce_at_source - 1), - (header_id(2), prev_confirmed_nonce_at_source), - ] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=22), proof_parameters(false, 3))) - ); - - // the same situation, but the header 2 is known to the target node, so we may deliver - // reward confirmation - let (mut state, mut strategy) = prepare_strategy(); - let prev_confirmed_nonce_at_source = - strategy.latest_confirmed_nonces_at_source.back().unwrap().1; - strategy.latest_confirmed_nonces_at_source = vec![ - (header_id(1), prev_confirmed_nonce_at_source - 1), - (header_id(2), prev_confirmed_nonce_at_source), - ] - .into_iter() - .collect(); - strategy.target_nonces.as_mut().unwrap().nonces_data.confirmed_nonce = - prev_confirmed_nonce_at_source - 1; - state.best_finalized_source_header_id_at_source = Some(header_id(2)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=23), proof_parameters(true, 4))) - ); - } - - #[async_std::test] - async fn source_header_is_required_when_confirmations_are_required() { - // let's prepare situation when: - // - all messages [20; 23] have been generated at source block#1; - let (mut state, mut strategy) = prepare_strategy(); - // - // - messages [20; 23] have been delivered - assert_eq!( - strategy.select_nonces_to_deliver(state.clone()).await, - Some(((20..=23), proof_parameters(false, 4))) - ); - strategy.finalized_target_nonces_updated( - TargetClientNonces { - latest_nonce: 23, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - messages_in_oldest_entry: 4, - total_messages: 4, - last_delivered_nonce: 23, - }, - }, - }, - &mut state, - ); - // nothing needs to be delivered now and we don't need any new headers - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - assert_eq!(strategy.required_source_header_at_target(state.clone()).await, None); - - // block#2 is generated - state.best_finalized_source_header_id_at_source = Some(header_id(2)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - state.best_target_header_id = Some(header_id(2)); - state.best_finalized_target_header_id = Some(header_id(2)); - - // now let's generate two more nonces [24; 25] at the source; - strategy.source_nonces_updated(header_id(2), source_nonces(24..=25, 19, 0)); - // - // we don't need to relay more headers to target, because messages [20; 23] have - // not confirmed to source yet - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - assert_eq!(strategy.required_source_header_at_target(state.clone()).await, None); - - // let's relay source block#3 - state.best_finalized_source_header_id_at_source = Some(header_id(3)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(3)); - state.best_target_header_id = Some(header_id(3)); - state.best_finalized_target_header_id = Some(header_id(3)); - - // and ask strategy again => still nothing to deliver, because parallel confirmations - // race need to be pushed further - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - assert_eq!(strategy.required_source_header_at_target(state.clone()).await, None); - - // let's relay source block#3 - state.best_finalized_source_header_id_at_source = Some(header_id(4)); - state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - state.best_target_header_id = Some(header_id(4)); - state.best_finalized_target_header_id = Some(header_id(4)); - - // let's confirm messages [20; 23] - strategy.source_nonces_updated(header_id(4), source_nonces(24..=25, 23, 0)); - - // and ask strategy again => now we have everything required to deliver remaining - // [24; 25] nonces and proof of [20; 23] confirmation - assert_eq!( - strategy.select_nonces_to_deliver(state.clone()).await, - Some(((24..=25), proof_parameters(true, 2))), - ); - assert_eq!(strategy.required_source_header_at_target(state).await, None); - } - - #[async_std::test] - async fn relayer_uses_flattened_view_of_the_source_queue_to_select_nonces() { - // Real scenario that has happened on test deployments: - // 1) relayer witnessed M1 at block 1 => it has separate entry in the `source_queue` - // 2) relayer witnessed M2 at block 2 => it has separate entry in the `source_queue` - // 3) if block 2 is known to the target node, then both M1 and M2 are selected for single - // delivery, even though weight(M1+M2) > larger than largest allowed weight - // - // This was happening because selector (`select_nonces_for_delivery_transaction`) has been - // called for every `source_queue` entry separately without preserving any context. - let (mut state, mut strategy) = prepare_strategy(); - let nonces = source_nonces(24..=25, 19, 0); - strategy.strategy.source_nonces_updated(header_id(2), nonces); - strategy.max_unrewarded_relayer_entries_at_target = 100; - strategy.max_unconfirmed_nonces_at_target = 100; - strategy.max_messages_in_single_batch = 5; - strategy.max_messages_weight_in_single_batch = Weight::from_parts(100, 0); - strategy.max_messages_size_in_single_batch = 100; - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((20..=24), proof_parameters(false, 5))) - ); - } - - #[async_std::test] - #[allow(clippy::reversed_empty_ranges)] - async fn no_source_headers_required_at_target_if_lanes_are_empty() { - let (state, _) = prepare_strategy(); - let mut strategy = TestStrategy { - max_unrewarded_relayer_entries_at_target: 4, - max_unconfirmed_nonces_at_target: 4, - max_messages_in_single_batch: 4, - max_messages_weight_in_single_batch: Weight::from_parts(4, 0), - max_messages_size_in_single_batch: 4, - latest_confirmed_nonces_at_source: VecDeque::new(), - lane_source_client: TestSourceClient::default(), - lane_target_client: TestTargetClient::default(), - metrics_msg: None, - target_nonces: None, - strategy: BasicStrategy::new(), - }; - - let source_header_id = header_id(10); - strategy.source_nonces_updated( - source_header_id, - // MessageDeliveryRaceSource::nonces returns Some(0), because that's how it is - // represented in memory (there's no Options in OutboundLaneState) - source_nonces(1u64..=0u64, 0, 0), - ); - - // even though `latest_confirmed_nonces_at_source` is not empty, new headers are not - // requested - assert_eq!( - strategy.latest_confirmed_nonces_at_source, - VecDeque::from([(source_header_id, 0)]) - ); - assert_eq!(strategy.required_source_header_at_target(state).await, None); - } - - #[async_std::test] - async fn previous_nonces_are_selected_if_reorg_happens_at_target_chain() { - // this is the copy of the similar test in the `mesage_race_strategy.rs`, but it also tests - // that the `MessageDeliveryStrategy` acts properly in the similar scenario - - // tune parameters to allow 5 nonces per delivery transaction - let (mut state, mut strategy) = prepare_strategy(); - strategy.max_unrewarded_relayer_entries_at_target = 5; - strategy.max_unconfirmed_nonces_at_target = 5; - strategy.max_messages_in_single_batch = 5; - strategy.max_messages_weight_in_single_batch = Weight::from_parts(5, 0); - strategy.max_messages_size_in_single_batch = 5; - - // in this state we have 4 available nonces for delivery - assert_eq!( - strategy.select_nonces_to_deliver(state.clone()).await, - Some(( - 20..=23, - MessageProofParameters { - outbound_state_proof_required: false, - dispatch_weight: Weight::from_parts(4, 0), - } - )), - ); - - // let's say we have submitted 20..=23 - state.nonces_submitted = Some(20..=23); - - // then new nonce 24 appear at the source block 2 - let new_nonce_24 = vec![( - 24, - MessageDetails { dispatch_weight: Weight::from_parts(1, 0), size: 0, reward: 0 }, - )] - .into_iter() - .collect(); - let source_header_2 = header_id(2); - state.best_finalized_source_header_id_at_source = Some(source_header_2); - strategy.source_nonces_updated( - source_header_2, - SourceClientNonces { new_nonces: new_nonce_24, confirmed_nonce: None }, - ); - // and nonce 23 appear at the best block of the target node (best finalized still has 0 - // nonces) - let target_nonces_data = DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState::default(), - }; - let target_header_2 = header_id(2); - state.best_target_header_id = Some(target_header_2); - strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce: 23, nonces_data: target_nonces_data.clone() }, - &mut state, - ); - - // then best target header is retracted - strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce: 19, nonces_data: target_nonces_data.clone() }, - &mut state, - ); - - // ... and some fork with 19 delivered nonces is finalized - let target_header_2_fork = header_id(2_1); - state.best_finalized_source_header_id_at_source = Some(source_header_2); - state.best_finalized_source_header_id_at_best_target = Some(source_header_2); - state.best_target_header_id = Some(target_header_2_fork); - state.best_finalized_target_header_id = Some(target_header_2_fork); - strategy.finalized_target_nonces_updated( - TargetClientNonces { latest_nonce: 19, nonces_data: target_nonces_data.clone() }, - &mut state, - ); - - // now we have to select nonces 20..=23 for delivery again - assert_eq!( - strategy.select_nonces_to_deliver(state.clone()).await, - Some(( - 20..=24, - MessageProofParameters { - outbound_state_proof_required: false, - dispatch_weight: Weight::from_parts(5, 0), - } - )), - ); - } - - #[async_std::test] - #[allow(clippy::reversed_empty_ranges)] - async fn delivery_race_is_able_to_unblock_lane() { - // step 1: messages 20..=23 are delivered from source to target at target block 2 - fn at_target_block_2_deliver_messages( - strategy: &mut TestStrategy, - state: &mut TestRaceState, - occupied_relayer_slots: MessageNonce, - occupied_message_slots: MessageNonce, - ) { - let nonces_at_target = TargetClientNonces { - latest_nonce: 23, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState { - unrewarded_relayer_entries: occupied_relayer_slots, - total_messages: occupied_message_slots, - ..Default::default() - }, - }, - }; - - state.best_target_header_id = Some(header_id(2)); - state.best_finalized_target_header_id = Some(header_id(2)); - - strategy.best_target_nonces_updated(nonces_at_target.clone(), state); - strategy.finalized_target_nonces_updated(nonces_at_target, state); - } - - // step 2: delivery of messages 20..=23 is confirmed to the source node at source block 2 - fn at_source_block_2_deliver_confirmations( - strategy: &mut TestStrategy, - state: &mut TestRaceState, - ) { - state.best_finalized_source_header_id_at_source = Some(header_id(2)); - - strategy.source_nonces_updated( - header_id(2), - SourceClientNonces { new_nonces: Default::default(), confirmed_nonce: Some(23) }, - ); - } - - // step 3: finalize source block 2 at target block 3 and select nonces to deliver - async fn at_target_block_3_select_nonces_to_deliver( - strategy: &TestStrategy, - mut state: TestRaceState, - ) -> Option<(RangeInclusive, MessageProofParameters)> { - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - state.best_target_header_id = Some(header_id(3)); - state.best_finalized_target_header_id = Some(header_id(3)); - - strategy.select_nonces_to_deliver(state).await - } - - let max_unrewarded_relayer_entries_at_target = 4; - let max_unconfirmed_nonces_at_target = 4; - let expected_rewards_proof = Some(( - 1..=0, - MessageProofParameters { - outbound_state_proof_required: true, - dispatch_weight: Weight::zero(), - }, - )); - - // when lane is NOT blocked - let (mut state, mut strategy) = prepare_strategy(); - at_target_block_2_deliver_messages( - &mut strategy, - &mut state, - max_unrewarded_relayer_entries_at_target - 1, - max_unconfirmed_nonces_at_target - 1, - ); - at_source_block_2_deliver_confirmations(&mut strategy, &mut state); - assert_eq!(strategy.required_source_header_at_target(state.clone()).await, None); - assert_eq!(at_target_block_3_select_nonces_to_deliver(&strategy, state).await, None); - - // when lane is blocked by no-relayer-slots in unrewarded relayers vector - let (mut state, mut strategy) = prepare_strategy(); - at_target_block_2_deliver_messages( - &mut strategy, - &mut state, - max_unrewarded_relayer_entries_at_target, - max_unconfirmed_nonces_at_target - 1, - ); - at_source_block_2_deliver_confirmations(&mut strategy, &mut state); - assert_eq!( - strategy.required_source_header_at_target(state.clone()).await, - Some(header_id(2)) - ); - assert_eq!( - at_target_block_3_select_nonces_to_deliver(&strategy, state).await, - expected_rewards_proof - ); - - // when lane is blocked by no-message-slots in unrewarded relayers vector - let (mut state, mut strategy) = prepare_strategy(); - at_target_block_2_deliver_messages( - &mut strategy, - &mut state, - max_unrewarded_relayer_entries_at_target - 1, - max_unconfirmed_nonces_at_target, - ); - at_source_block_2_deliver_confirmations(&mut strategy, &mut state); - assert_eq!( - strategy.required_source_header_at_target(state.clone()).await, - Some(header_id(2)) - ); - assert_eq!( - at_target_block_3_select_nonces_to_deliver(&strategy, state).await, - expected_rewards_proof - ); - - // when lane is blocked by no-message-slots and no-message-slots in unrewarded relayers - // vector - let (mut state, mut strategy) = prepare_strategy(); - at_target_block_2_deliver_messages( - &mut strategy, - &mut state, - max_unrewarded_relayer_entries_at_target - 1, - max_unconfirmed_nonces_at_target, - ); - at_source_block_2_deliver_confirmations(&mut strategy, &mut state); - assert_eq!( - strategy.required_source_header_at_target(state.clone()).await, - Some(header_id(2)) - ); - assert_eq!( - at_target_block_3_select_nonces_to_deliver(&strategy, state).await, - expected_rewards_proof - ); - - // when we have already selected some nonces to deliver, we don't need to select anything - let (mut state, mut strategy) = prepare_strategy(); - at_target_block_2_deliver_messages( - &mut strategy, - &mut state, - max_unrewarded_relayer_entries_at_target - 1, - max_unconfirmed_nonces_at_target, - ); - at_source_block_2_deliver_confirmations(&mut strategy, &mut state); - state.nonces_to_submit = Some((header_id(2), 1..=0, (1..=0, None))); - assert_eq!(strategy.required_source_header_at_target(state.clone()).await, None); - assert_eq!(at_target_block_3_select_nonces_to_deliver(&strategy, state).await, None); - - // when we have already submitted some nonces, we don't need to select anything - let (mut state, mut strategy) = prepare_strategy(); - at_target_block_2_deliver_messages( - &mut strategy, - &mut state, - max_unrewarded_relayer_entries_at_target - 1, - max_unconfirmed_nonces_at_target, - ); - at_source_block_2_deliver_confirmations(&mut strategy, &mut state); - state.nonces_submitted = Some(1..=0); - assert_eq!(strategy.required_source_header_at_target(state.clone()).await, None); - assert_eq!(at_target_block_3_select_nonces_to_deliver(&strategy, state).await, None); - } - - #[async_std::test] - async fn outbound_state_proof_is_not_required_when_we_have_no_new_confirmations() { - let (mut state, mut strategy) = prepare_strategy(); - - // pretend that we haven't seen any confirmations yet (or they're at the future target chain - // blocks) - strategy.latest_confirmed_nonces_at_source.clear(); - - // emulate delivery of some nonces (20..=23 are generated, but we only deliver 20..=21) - let nonces_at_target = TargetClientNonces { - latest_nonce: 21, - nonces_data: DeliveryRaceTargetNoncesData { - confirmed_nonce: 19, - unrewarded_relayers: UnrewardedRelayersState { - unrewarded_relayer_entries: 1, - total_messages: 2, - ..Default::default() - }, - }, - }; - state.best_target_header_id = Some(header_id(2)); - state.best_finalized_target_header_id = Some(header_id(2)); - strategy.best_target_nonces_updated(nonces_at_target.clone(), &mut state); - strategy.finalized_target_nonces_updated(nonces_at_target, &mut state); - - // we won't include outbound lane state proof into 22..=23 delivery transaction - // because it brings no new reward confirmations - assert_eq!( - strategy.select_nonces_to_deliver(state).await, - Some(((22..=23), proof_parameters(false, 2))) - ); - } -} diff --git a/relays/messages/src/message_race_limits.rs b/relays/messages/src/message_race_limits.rs deleted file mode 100644 index 873bb6aad04253b833c51cea89f2124167e077d1..0000000000000000000000000000000000000000 --- a/relays/messages/src/message_race_limits.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! enforcement strategy - -use num_traits::Zero; -use std::ops::RangeInclusive; - -use bp_messages::{MessageNonce, Weight}; - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{ - MessageDetails, MessageDetailsMap, SourceClient as MessageLaneSourceClient, - TargetClient as MessageLaneTargetClient, - }, - message_race_loop::NoncesRange, - message_race_strategy::SourceRangesQueue, - metrics::MessageLaneLoopMetrics, -}; - -/// Reference data for participating in relay -pub struct RelayReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Metrics reference. - pub metrics: Option, - /// Messages size summary - pub selected_size: u32, - - /// Hard check begin nonce - pub hard_selected_begin_nonce: MessageNonce, - - /// Index by all ready nonces - pub index: usize, - /// Current nonce - pub nonce: MessageNonce, - /// Current nonce details - pub details: MessageDetails, -} - -/// Relay reference data -pub struct RelayMessagesBatchReference< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, -> { - /// Maximal number of relayed messages in single delivery transaction. - pub max_messages_in_this_batch: MessageNonce, - /// Maximal cumulative dispatch weight of relayed messages in single delivery transaction. - pub max_messages_weight_in_single_batch: Weight, - /// Maximal cumulative size of relayed messages in single delivery transaction. - pub max_messages_size_in_single_batch: u32, - /// The client that is connected to the message lane source node. - pub lane_source_client: SourceClient, - /// The client that is connected to the message lane target node. - pub lane_target_client: TargetClient, - /// Metrics reference. - pub metrics: Option, - /// Best available nonce at the **best** target block. We do not want to deliver nonces - /// less than this nonce, even though the block may be retracted. - pub best_target_nonce: MessageNonce, - /// Source queue. - pub nonces_queue: SourceRangesQueue< - P::SourceHeaderHash, - P::SourceHeaderNumber, - MessageDetailsMap, - >, - /// Range of indices within the `nonces_queue` that are available for selection. - pub nonces_queue_range: RangeInclusive, -} - -/// Limits of the message race transactions. -#[derive(Clone)] -pub struct MessageRaceLimits; - -impl MessageRaceLimits { - pub async fn decide< - P: MessageLane, - SourceClient: MessageLaneSourceClient

, - TargetClient: MessageLaneTargetClient

, - >( - reference: RelayMessagesBatchReference, - ) -> Option> { - let mut hard_selected_count = 0; - - let mut selected_weight = Weight::zero(); - let mut selected_count: MessageNonce = 0; - - let hard_selected_begin_nonce = std::cmp::max( - reference.best_target_nonce + 1, - reference.nonces_queue[*reference.nonces_queue_range.start()].1.begin(), - ); - - // relay reference - let mut relay_reference = RelayReference { - lane_source_client: reference.lane_source_client.clone(), - lane_target_client: reference.lane_target_client.clone(), - metrics: reference.metrics.clone(), - - selected_size: 0, - - hard_selected_begin_nonce, - - index: 0, - nonce: 0, - details: MessageDetails { - dispatch_weight: Weight::zero(), - size: 0, - reward: P::SourceChainBalance::zero(), - }, - }; - - let all_ready_nonces = reference - .nonces_queue - .range(reference.nonces_queue_range.clone()) - .flat_map(|(_, ready_nonces)| ready_nonces.iter()) - .filter(|(nonce, _)| **nonce >= hard_selected_begin_nonce) - .enumerate(); - for (index, (nonce, details)) in all_ready_nonces { - relay_reference.index = index; - relay_reference.nonce = *nonce; - relay_reference.details = *details; - - // Since we (hopefully) have some reserves in `max_messages_weight_in_single_batch` - // and `max_messages_size_in_single_batch`, we may still try to submit transaction - // with single message if message overflows these limits. The worst case would be if - // transaction will be rejected by the target runtime, but at least we have tried. - - // limit messages in the batch by weight - let new_selected_weight = match selected_weight.checked_add(&details.dispatch_weight) { - Some(new_selected_weight) - if new_selected_weight - .all_lte(reference.max_messages_weight_in_single_batch) => - new_selected_weight, - new_selected_weight if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with declared dispatch \ - weight {:?} that overflows maximal configured weight {}", - new_selected_weight, - reference.max_messages_weight_in_single_batch, - ); - new_selected_weight.unwrap_or(Weight::MAX) - }, - _ => break, - }; - - // limit messages in the batch by size - let new_selected_size = match relay_reference.selected_size.checked_add(details.size) { - Some(new_selected_size) - if new_selected_size <= reference.max_messages_size_in_single_batch => - new_selected_size, - new_selected_size if selected_count == 0 => { - log::warn!( - target: "bridge", - "Going to submit message delivery transaction with message \ - size {:?} that overflows maximal configured size {}", - new_selected_size, - reference.max_messages_size_in_single_batch, - ); - new_selected_size.unwrap_or(u32::MAX) - }, - _ => break, - }; - - // limit number of messages in the batch - let new_selected_count = selected_count + 1; - if new_selected_count > reference.max_messages_in_this_batch { - break - } - relay_reference.selected_size = new_selected_size; - - hard_selected_count = index + 1; - selected_weight = new_selected_weight; - selected_count = new_selected_count; - } - - if hard_selected_count != 0 { - let selected_max_nonce = - hard_selected_begin_nonce + hard_selected_count as MessageNonce - 1; - Some(hard_selected_begin_nonce..=selected_max_nonce) - } else { - None - } - } -} diff --git a/relays/messages/src/message_race_loop.rs b/relays/messages/src/message_race_loop.rs deleted file mode 100644 index f28be78842fc14b019b32c7a8e3c6d35625b77f9..0000000000000000000000000000000000000000 --- a/relays/messages/src/message_race_loop.rs +++ /dev/null @@ -1,835 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Loop that is serving single race within message lane. This could be -//! message delivery race, receiving confirmations race or processing -//! confirmations race. -//! -//! The idea of the race is simple - we have `nonce`-s on source and target -//! nodes. We're trying to prove that the source node has this nonce (and -//! associated data - like messages, lane state, etc) to the target node by -//! generating and submitting proof. - -use crate::message_lane_loop::{BatchTransaction, ClientState, NoncesSubmitArtifacts}; - -use async_trait::async_trait; -use bp_messages::MessageNonce; -use futures::{ - future::{FutureExt, TryFutureExt}, - stream::{FusedStream, StreamExt}, -}; -use relay_utils::{ - process_future_result, retry_backoff, FailedClient, MaybeConnectionError, - TrackedTransactionStatus, TransactionTracker, -}; -use std::{ - fmt::Debug, - ops::RangeInclusive, - time::{Duration, Instant}, -}; - -/// One of races within lane. -pub trait MessageRace { - /// Header id of the race source. - type SourceHeaderId: Debug + Clone + PartialEq + Send + Sync; - /// Header id of the race source. - type TargetHeaderId: Debug + Clone + PartialEq + Send + Sync; - - /// Message nonce used in the race. - type MessageNonce: Debug + Clone; - /// Proof that is generated and delivered in this race. - type Proof: Debug + Clone + Send + Sync; - - /// Name of the race source. - fn source_name() -> String; - /// Name of the race target. - fn target_name() -> String; -} - -/// State of race source client. -type SourceClientState

= - ClientState<

::SourceHeaderId,

::TargetHeaderId>; - -/// State of race target client. -type TargetClientState

= - ClientState<

::TargetHeaderId,

::SourceHeaderId>; - -/// Inclusive nonces range. -pub trait NoncesRange: Debug + Sized { - /// Get begin of the range. - fn begin(&self) -> MessageNonce; - /// Get end of the range. - fn end(&self) -> MessageNonce; - /// Returns new range with current range nonces that are greater than the passed `nonce`. - /// If there are no such nonces, `None` is returned. - fn greater_than(self, nonce: MessageNonce) -> Option; -} - -/// Nonces on the race source client. -#[derive(Debug, Clone)] -pub struct SourceClientNonces { - /// New nonces range known to the client. `New` here means all nonces generated after - /// `prev_latest_nonce` passed to the `SourceClient::nonces` method. - pub new_nonces: NoncesRange, - /// The latest nonce that is confirmed to the bridged client. This nonce only makes - /// sense in some races. In other races it is `None`. - pub confirmed_nonce: Option, -} - -/// Nonces on the race target client. -#[derive(Debug, Clone)] -pub struct TargetClientNonces { - /// The latest nonce that is known to the target client. - pub latest_nonce: MessageNonce, - /// Additional data from target node that may be used by the race. - pub nonces_data: TargetNoncesData, -} - -/// One of message lane clients, which is source client for the race. -#[async_trait] -pub trait SourceClient { - /// Type of error these clients returns. - type Error: std::fmt::Debug + MaybeConnectionError; - /// Type of nonces range returned by the source client. - type NoncesRange: NoncesRange; - /// Additional proof parameters required to generate proof. - type ProofParameters; - - /// Return nonces that are known to the source client. - async fn nonces( - &self, - at_block: P::SourceHeaderId, - prev_latest_nonce: MessageNonce, - ) -> Result<(P::SourceHeaderId, SourceClientNonces), Self::Error>; - /// Generate proof for delivering to the target client. - async fn generate_proof( - &self, - at_block: P::SourceHeaderId, - nonces: RangeInclusive, - proof_parameters: Self::ProofParameters, - ) -> Result<(P::SourceHeaderId, RangeInclusive, P::Proof), Self::Error>; -} - -/// One of message lane clients, which is target client for the race. -#[async_trait] -pub trait TargetClient { - /// Type of error these clients returns. - type Error: std::fmt::Debug + MaybeConnectionError; - /// Type of the additional data from the target client, used by the race. - type TargetNoncesData: std::fmt::Debug; - /// Type of batch transaction that submits finality and proof to the target node. - type BatchTransaction: BatchTransaction + Clone; - /// Transaction tracker to track submitted transactions. - type TransactionTracker: TransactionTracker; - - /// Ask headers relay to relay finalized headers up to (and including) given header - /// from race source to race target. - /// - /// The client may return `Some(_)`, which means that nothing has happened yet and - /// the caller must generate and append proof to the batch transaction - /// to actually send it (along with required header) to the node. - /// - /// If function has returned `None`, it means that the caller now must wait for the - /// appearance of the required header `id` at the target client. - async fn require_source_header( - &self, - id: P::SourceHeaderId, - ) -> Result, Self::Error>; - - /// Return nonces that are known to the target client. - async fn nonces( - &self, - at_block: P::TargetHeaderId, - update_metrics: bool, - ) -> Result<(P::TargetHeaderId, TargetClientNonces), Self::Error>; - /// Submit proof to the target client. - async fn submit_proof( - &self, - maybe_batch_tx: Option, - generated_at_block: P::SourceHeaderId, - nonces: RangeInclusive, - proof: P::Proof, - ) -> Result, Self::Error>; -} - -/// Race strategy. -#[async_trait] -pub trait RaceStrategy: Debug { - /// Type of nonces range expected from the source client. - type SourceNoncesRange: NoncesRange; - /// Additional proof parameters required to generate proof. - type ProofParameters; - /// Additional data expected from the target client. - type TargetNoncesData; - - /// Should return true if nothing has to be synced. - fn is_empty(&self) -> bool; - /// Return id of source header that is required to be on target to continue synchronization. - async fn required_source_header_at_target>( - &self, - race_state: RS, - ) -> Option; - /// Return the best nonce at source node. - /// - /// `Some` is returned only if we are sure that the value is greater or equal - /// than the result of `best_at_target`. - fn best_at_source(&self) -> Option; - /// Return the best nonce at target node. - /// - /// May return `None` if value is yet unknown. - fn best_at_target(&self) -> Option; - - /// Called when nonces are updated at source node of the race. - fn source_nonces_updated( - &mut self, - at_block: SourceHeaderId, - nonces: SourceClientNonces, - ); - /// Called when we want to wait until next `best_target_nonces_updated` before selecting - /// any nonces for delivery. - fn reset_best_target_nonces(&mut self); - /// Called when best nonces are updated at target node of the race. - fn best_target_nonces_updated>( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RS, - ); - /// Called when finalized nonces are updated at target node of the race. - fn finalized_target_nonces_updated>( - &mut self, - nonces: TargetClientNonces, - race_state: &mut RS, - ); - /// Should return `Some(nonces)` if we need to deliver proof of `nonces` (and associated - /// data) from source to target node. - /// Additionally, parameters required to generate proof are returned. - async fn select_nonces_to_deliver>( - &self, - race_state: RS, - ) -> Option<(RangeInclusive, Self::ProofParameters)>; -} - -/// State of the race. -pub trait RaceState: Clone + Send + Sync { - /// Set best finalized source header id at the best block on the target - /// client (at the `best_finalized_source_header_id_at_best_target`). - fn set_best_finalized_source_header_id_at_best_target(&mut self, id: SourceHeaderId); - - /// Best finalized source header id at the source client. - fn best_finalized_source_header_id_at_source(&self) -> Option; - /// Best finalized source header id at the best block on the target - /// client (at the `best_finalized_source_header_id_at_best_target`). - fn best_finalized_source_header_id_at_best_target(&self) -> Option; - /// The best header id at the target client. - fn best_target_header_id(&self) -> Option; - /// Best finalized header id at the target client. - fn best_finalized_target_header_id(&self) -> Option; - - /// Returns `true` if we have selected nonces to submit to the target node. - fn nonces_to_submit(&self) -> Option>; - /// Reset our nonces selection. - fn reset_nonces_to_submit(&mut self); - - /// Returns `true` if we have submitted some nonces to the target node and are - /// waiting for them to appear there. - fn nonces_submitted(&self) -> Option>; - /// Reset our nonces submission. - fn reset_nonces_submitted(&mut self); -} - -/// State of the race and prepared batch transaction (if available). -#[derive(Debug, Clone)] -pub(crate) struct RaceStateImpl { - /// Best finalized source header id at the source client. - pub best_finalized_source_header_id_at_source: Option, - /// Best finalized source header id at the best block on the target - /// client (at the `best_finalized_source_header_id_at_best_target`). - pub best_finalized_source_header_id_at_best_target: Option, - /// The best header id at the target client. - pub best_target_header_id: Option, - /// Best finalized header id at the target client. - pub best_finalized_target_header_id: Option, - /// Range of nonces that we have selected to submit. - pub nonces_to_submit: Option<(SourceHeaderId, RangeInclusive, Proof)>, - /// Batch transaction ready to include and deliver selected `nonces_to_submit` from the - /// `state`. - pub nonces_to_submit_batch: Option, - /// Range of nonces that is currently submitted. - pub nonces_submitted: Option>, -} - -impl Default - for RaceStateImpl -{ - fn default() -> Self { - RaceStateImpl { - best_finalized_source_header_id_at_source: None, - best_finalized_source_header_id_at_best_target: None, - best_target_header_id: None, - best_finalized_target_header_id: None, - nonces_to_submit: None, - nonces_to_submit_batch: None, - nonces_submitted: None, - } - } -} - -impl RaceState - for RaceStateImpl -where - SourceHeaderId: Clone + Send + Sync, - TargetHeaderId: Clone + Send + Sync, - Proof: Clone + Send + Sync, - BatchTx: Clone + Send + Sync, -{ - fn set_best_finalized_source_header_id_at_best_target(&mut self, id: SourceHeaderId) { - self.best_finalized_source_header_id_at_best_target = Some(id); - } - - fn best_finalized_source_header_id_at_source(&self) -> Option { - self.best_finalized_source_header_id_at_source.clone() - } - - fn best_finalized_source_header_id_at_best_target(&self) -> Option { - self.best_finalized_source_header_id_at_best_target.clone() - } - - fn best_target_header_id(&self) -> Option { - self.best_target_header_id.clone() - } - - fn best_finalized_target_header_id(&self) -> Option { - self.best_finalized_target_header_id.clone() - } - - fn nonces_to_submit(&self) -> Option> { - self.nonces_to_submit.as_ref().map(|(_, nonces, _)| nonces.clone()) - } - - fn reset_nonces_to_submit(&mut self) { - self.nonces_to_submit = None; - self.nonces_to_submit_batch = None; - } - - fn nonces_submitted(&self) -> Option> { - self.nonces_submitted.clone() - } - - fn reset_nonces_submitted(&mut self) { - self.nonces_submitted = None; - } -} - -/// Run race loop until connection with target or source node is lost. -pub async fn run, TC: TargetClient

>( - race_source: SC, - race_source_updated: impl FusedStream>, - race_target: TC, - race_target_updated: impl FusedStream>, - mut strategy: impl RaceStrategy< - P::SourceHeaderId, - P::TargetHeaderId, - P::Proof, - SourceNoncesRange = SC::NoncesRange, - ProofParameters = SC::ProofParameters, - TargetNoncesData = TC::TargetNoncesData, - >, -) -> Result<(), FailedClient> { - let mut progress_context = Instant::now(); - let mut race_state = RaceStateImpl::default(); - - let mut source_retry_backoff = retry_backoff(); - let mut source_client_is_online = true; - let mut source_nonces_required = false; - let mut source_required_header = None; - let source_nonces = futures::future::Fuse::terminated(); - let source_generate_proof = futures::future::Fuse::terminated(); - let source_go_offline_future = futures::future::Fuse::terminated(); - - let mut target_retry_backoff = retry_backoff(); - let mut target_client_is_online = true; - let mut target_best_nonces_required = false; - let mut target_finalized_nonces_required = false; - let mut target_batch_transaction = None; - let target_require_source_header = futures::future::Fuse::terminated(); - let target_best_nonces = futures::future::Fuse::terminated(); - let target_finalized_nonces = futures::future::Fuse::terminated(); - let target_submit_proof = futures::future::Fuse::terminated(); - let target_tx_tracker = futures::future::Fuse::terminated(); - let target_go_offline_future = futures::future::Fuse::terminated(); - - futures::pin_mut!( - race_source_updated, - source_nonces, - source_generate_proof, - source_go_offline_future, - race_target_updated, - target_require_source_header, - target_best_nonces, - target_finalized_nonces, - target_submit_proof, - target_tx_tracker, - target_go_offline_future, - ); - - loop { - futures::select! { - // when headers ids are updated - source_state = race_source_updated.next() => { - if let Some(source_state) = source_state { - let is_source_state_updated = race_state.best_finalized_source_header_id_at_source.as_ref() - != Some(&source_state.best_finalized_self); - if is_source_state_updated { - source_nonces_required = true; - race_state.best_finalized_source_header_id_at_source - = Some(source_state.best_finalized_self); - } - } - }, - target_state = race_target_updated.next() => { - if let Some(target_state) = target_state { - let is_target_best_state_updated = race_state.best_target_header_id.as_ref() - != Some(&target_state.best_self); - - if is_target_best_state_updated { - target_best_nonces_required = true; - race_state.best_target_header_id = Some(target_state.best_self); - race_state.best_finalized_source_header_id_at_best_target - = target_state.best_finalized_peer_at_best_self; - } - - let is_target_finalized_state_updated = race_state.best_finalized_target_header_id.as_ref() - != Some(&target_state.best_finalized_self); - if is_target_finalized_state_updated { - target_finalized_nonces_required = true; - race_state.best_finalized_target_header_id = Some(target_state.best_finalized_self); - } - } - }, - - // when nonces are updated - nonces = source_nonces => { - source_nonces_required = false; - - source_client_is_online = process_future_result( - nonces, - &mut source_retry_backoff, - |(at_block, nonces)| { - log::debug!( - target: "bridge", - "Received nonces from {}: {:?}", - P::source_name(), - nonces, - ); - - strategy.source_nonces_updated(at_block, nonces); - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving nonces from {}", P::source_name()), - ).fail_if_connection_error(FailedClient::Source)?; - - // ask for more headers if we have nonces to deliver and required headers are missing - source_required_header = strategy - .required_source_header_at_target(race_state.clone()) - .await; - }, - nonces = target_best_nonces => { - target_best_nonces_required = false; - - target_client_is_online = process_future_result( - nonces, - &mut target_retry_backoff, - |(_, nonces)| { - log::debug!( - target: "bridge", - "Received best nonces from {}: {:?}", - P::target_name(), - nonces, - ); - - strategy.best_target_nonces_updated(nonces, &mut race_state); - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving best nonces from {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - nonces = target_finalized_nonces => { - target_finalized_nonces_required = false; - - target_client_is_online = process_future_result( - nonces, - &mut target_retry_backoff, - |(_, nonces)| { - log::debug!( - target: "bridge", - "Received finalized nonces from {}: {:?}", - P::target_name(), - nonces, - ); - - strategy.finalized_target_nonces_updated(nonces, &mut race_state); - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error retrieving finalized nonces from {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - - // proof generation and submission - maybe_batch_transaction = target_require_source_header => { - source_required_header = None; - - target_client_is_online = process_future_result( - maybe_batch_transaction, - &mut target_retry_backoff, - |maybe_batch_transaction: Option| { - log::debug!( - target: "bridge", - "Target {} client has been asked for more {} headers. Batch tx: {}", - P::target_name(), - P::source_name(), - maybe_batch_transaction - .as_ref() - .map(|bt| format!("yes ({:?})", bt.required_header_id())) - .unwrap_or_else(|| "no".into()), - ); - - target_batch_transaction = maybe_batch_transaction; - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error asking for source headers at {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - }, - proof = source_generate_proof => { - source_client_is_online = process_future_result( - proof, - &mut source_retry_backoff, - |(at_block, nonces_range, proof, batch_transaction)| { - log::debug!( - target: "bridge", - "Received proof for nonces in range {:?} from {}", - nonces_range, - P::source_name(), - ); - - race_state.nonces_to_submit = Some((at_block, nonces_range, proof)); - race_state.nonces_to_submit_batch = batch_transaction; - }, - &mut source_go_offline_future, - async_std::task::sleep, - || format!("Error generating proof at {}", P::source_name()), - ).fail_if_error(FailedClient::Source).map(|_| true)?; - }, - proof_submit_result = target_submit_proof => { - target_client_is_online = process_future_result( - proof_submit_result, - &mut target_retry_backoff, - |artifacts: NoncesSubmitArtifacts| { - log::debug!( - target: "bridge", - "Successfully submitted proof of nonces {:?} to {}", - artifacts.nonces, - P::target_name(), - ); - - race_state.nonces_submitted = Some(artifacts.nonces); - target_tx_tracker.set(artifacts.tx_tracker.wait().fuse()); - }, - &mut target_go_offline_future, - async_std::task::sleep, - || format!("Error submitting proof {}", P::target_name()), - ).fail_if_connection_error(FailedClient::Target)?; - - // in any case - we don't need to retry submitting the same nonces again until - // we read nonces from the target client - race_state.reset_nonces_to_submit(); - // if we have failed to submit transaction AND that is not the connection issue, - // then we need to read best target nonces before selecting nonces again - if !target_client_is_online { - strategy.reset_best_target_nonces(); - } - }, - target_transaction_status = target_tx_tracker => { - match (target_transaction_status, race_state.nonces_submitted.as_ref()) { - (TrackedTransactionStatus::Finalized(at_block), Some(nonces_submitted)) => { - // our transaction has been mined, but was it successful or not? let's check the best - // nonce at the target node. - let _ = race_target.nonces(at_block, false) - .await - .map_err(|e| format!("failed to read nonces from target node: {e:?}")) - .and_then(|(_, nonces_at_target)| { - if nonces_at_target.latest_nonce < *nonces_submitted.end() { - Err(format!( - "best nonce at target after tx is {:?} and we've submitted {:?}", - nonces_at_target.latest_nonce, - nonces_submitted.end(), - )) - } else { - Ok(()) - } - }) - .map_err(|e| { - log::error!( - target: "bridge", - "{} -> {} race transaction failed: {}", - P::source_name(), - P::target_name(), - e, - ); - - race_state.reset_nonces_submitted(); - }); - }, - (TrackedTransactionStatus::Lost, _) => { - log::warn!( - target: "bridge", - "{} -> {} race transaction has been lost. State: {:?}. Strategy: {:?}", - P::source_name(), - P::target_name(), - race_state, - strategy, - ); - - race_state.reset_nonces_submitted(); - }, - _ => (), - } - }, - - // when we're ready to retry request - _ = source_go_offline_future => { - source_client_is_online = true; - }, - _ = target_go_offline_future => { - target_client_is_online = true; - }, - } - - progress_context = print_race_progress::(progress_context, &strategy); - - if source_client_is_online { - source_client_is_online = false; - - // if we've started to submit batch transaction, let's prioritize it - // - // we're using `take` here, because we don't need batch transaction (i.e. some - // underlying finality proof) anymore for our future calls - we were unable to - // use it for our current state, so why would we need to keep an obsolete proof - // for the future? - let target_batch_transaction = target_batch_transaction.take(); - let expected_race_state = - if let Some(ref target_batch_transaction) = target_batch_transaction { - // when selecting nonces for the batch transaction, we assume that the required - // source header is already at the target chain - let required_source_header_at_target = - target_batch_transaction.required_header_id(); - let mut expected_race_state = race_state.clone(); - expected_race_state.best_finalized_source_header_id_at_best_target = - Some(required_source_header_at_target); - expected_race_state - } else { - race_state.clone() - }; - - let nonces_to_deliver = select_nonces_to_deliver(expected_race_state, &strategy).await; - let best_at_source = strategy.best_at_source(); - - if let Some((at_block, nonces_range, proof_parameters)) = nonces_to_deliver { - log::debug!( - target: "bridge", - "Asking {} to prove nonces in range {:?} at block {:?}", - P::source_name(), - nonces_range, - at_block, - ); - - source_generate_proof.set( - race_source - .generate_proof(at_block, nonces_range, proof_parameters) - .and_then(|(at_source_block, nonces, proof)| async { - Ok((at_source_block, nonces, proof, target_batch_transaction)) - }) - .fuse(), - ); - } else if let (true, Some(best_at_source)) = (source_nonces_required, best_at_source) { - log::debug!(target: "bridge", "Asking {} about message nonces", P::source_name()); - let at_block = race_state - .best_finalized_source_header_id_at_source - .as_ref() - .expect( - "source_nonces_required is only true when\ - best_finalized_source_header_id_at_source is Some; qed", - ) - .clone(); - source_nonces.set(race_source.nonces(at_block, best_at_source).fuse()); - } else { - source_client_is_online = true; - } - } - - if target_client_is_online { - target_client_is_online = false; - - if let Some((at_block, nonces_range, proof)) = race_state.nonces_to_submit.as_ref() { - log::debug!( - target: "bridge", - "Going to submit proof of messages in range {:?} to {} node{}", - nonces_range, - P::target_name(), - race_state.nonces_to_submit_batch.as_ref().map(|tx| format!( - ". This transaction is batched with sending the proof for header {:?}.", - tx.required_header_id()) - ).unwrap_or_default(), - ); - - target_submit_proof.set( - race_target - .submit_proof( - race_state.nonces_to_submit_batch.clone(), - at_block.clone(), - nonces_range.clone(), - proof.clone(), - ) - .fuse(), - ); - } else if let Some(source_required_header) = source_required_header.clone() { - log::debug!( - target: "bridge", - "Going to require {} header {:?} at {}", - P::source_name(), - source_required_header, - P::target_name(), - ); - target_require_source_header - .set(race_target.require_source_header(source_required_header).fuse()); - } else if target_best_nonces_required { - log::debug!(target: "bridge", "Asking {} about best message nonces", P::target_name()); - let at_block = race_state - .best_target_header_id - .as_ref() - .expect("target_best_nonces_required is only true when best_target_header_id is Some; qed") - .clone(); - target_best_nonces.set(race_target.nonces(at_block, false).fuse()); - } else if target_finalized_nonces_required { - log::debug!(target: "bridge", "Asking {} about finalized message nonces", P::target_name()); - let at_block = race_state - .best_finalized_target_header_id - .as_ref() - .expect( - "target_finalized_nonces_required is only true when\ - best_finalized_target_header_id is Some; qed", - ) - .clone(); - target_finalized_nonces.set(race_target.nonces(at_block, true).fuse()); - } else { - target_client_is_online = true; - } - } - } -} - -/// Print race progress. -fn print_race_progress(prev_time: Instant, strategy: &S) -> Instant -where - P: MessageRace, - S: RaceStrategy, -{ - let now_time = Instant::now(); - - let need_update = now_time.saturating_duration_since(prev_time) > Duration::from_secs(10); - if !need_update { - return prev_time - } - - let now_best_nonce_at_source = strategy.best_at_source(); - let now_best_nonce_at_target = strategy.best_at_target(); - log::info!( - target: "bridge", - "Synced {:?} of {:?} nonces in {} -> {} race", - now_best_nonce_at_target, - now_best_nonce_at_source, - P::source_name(), - P::target_name(), - ); - now_time -} - -async fn select_nonces_to_deliver( - race_state: impl RaceState, - strategy: &Strategy, -) -> Option<(SourceHeaderId, RangeInclusive, Strategy::ProofParameters)> -where - SourceHeaderId: Clone, - Strategy: RaceStrategy, -{ - let best_finalized_source_header_id_at_best_target = - race_state.best_finalized_source_header_id_at_best_target()?; - strategy - .select_nonces_to_deliver(race_state) - .await - .map(|(nonces_range, proof_parameters)| { - (best_finalized_source_header_id_at_best_target, nonces_range, proof_parameters) - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::message_race_strategy::BasicStrategy; - use relay_utils::HeaderId; - - #[async_std::test] - async fn proof_is_generated_at_best_block_known_to_target_node() { - const GENERATED_AT: u64 = 6; - const BEST_AT_SOURCE: u64 = 10; - const BEST_AT_TARGET: u64 = 8; - - // target node only knows about source' BEST_AT_TARGET block - // source node has BEST_AT_SOURCE > BEST_AT_TARGET block - let mut race_state = RaceStateImpl::<_, _, (), ()> { - best_finalized_source_header_id_at_source: Some(HeaderId( - BEST_AT_SOURCE, - BEST_AT_SOURCE, - )), - best_finalized_source_header_id_at_best_target: Some(HeaderId( - BEST_AT_TARGET, - BEST_AT_TARGET, - )), - best_target_header_id: Some(HeaderId(0, 0)), - best_finalized_target_header_id: Some(HeaderId(0, 0)), - nonces_to_submit: None, - nonces_to_submit_batch: None, - nonces_submitted: None, - }; - - // we have some nonces to deliver and they're generated at GENERATED_AT < BEST_AT_SOURCE - let mut strategy = BasicStrategy::<_, _, _, _, _, ()>::new(); - strategy.source_nonces_updated( - HeaderId(GENERATED_AT, GENERATED_AT), - SourceClientNonces { new_nonces: 0..=10, confirmed_nonce: None }, - ); - strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce: 5u64, nonces_data: () }, - &mut race_state, - ); - - // the proof will be generated on source, but using BEST_AT_TARGET block - assert_eq!( - select_nonces_to_deliver(race_state, &strategy).await, - Some((HeaderId(BEST_AT_TARGET, BEST_AT_TARGET), 6..=10, (),)) - ); - } -} diff --git a/relays/messages/src/message_race_receiving.rs b/relays/messages/src/message_race_receiving.rs deleted file mode 100644 index e6497a1b79eb70999f16cd0c1926ae9cf0ad5411..0000000000000000000000000000000000000000 --- a/relays/messages/src/message_race_receiving.rs +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Message receiving race delivers proof-of-messages-delivery from "lane.target" to "lane.source". - -use crate::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::{ - NoncesSubmitArtifacts, SourceClient as MessageLaneSourceClient, SourceClientState, - TargetClient as MessageLaneTargetClient, TargetClientState, - }, - message_race_loop::{ - MessageRace, NoncesRange, SourceClient, SourceClientNonces, TargetClient, - TargetClientNonces, - }, - message_race_strategy::BasicStrategy, - metrics::MessageLaneLoopMetrics, -}; - -use async_trait::async_trait; -use bp_messages::MessageNonce; -use futures::stream::FusedStream; -use relay_utils::FailedClient; -use std::{marker::PhantomData, ops::RangeInclusive}; - -/// Message receiving confirmations delivery strategy. -type ReceivingConfirmationsBasicStrategy

= BasicStrategy< -

::TargetHeaderNumber, -

::TargetHeaderHash, -

::SourceHeaderNumber, -

::SourceHeaderHash, - RangeInclusive, -

::MessagesReceivingProof, ->; - -/// Run receiving confirmations race. -pub async fn run( - source_client: impl MessageLaneSourceClient

, - source_state_updates: impl FusedStream>, - target_client: impl MessageLaneTargetClient

, - target_state_updates: impl FusedStream>, - metrics_msg: Option, -) -> Result<(), FailedClient> { - crate::message_race_loop::run( - ReceivingConfirmationsRaceSource { - client: target_client, - metrics_msg: metrics_msg.clone(), - _phantom: Default::default(), - }, - target_state_updates, - ReceivingConfirmationsRaceTarget { - client: source_client, - metrics_msg, - _phantom: Default::default(), - }, - source_state_updates, - ReceivingConfirmationsBasicStrategy::

::new(), - ) - .await -} - -/// Messages receiving confirmations race. -struct ReceivingConfirmationsRace

(std::marker::PhantomData

); - -impl MessageRace for ReceivingConfirmationsRace

{ - type SourceHeaderId = TargetHeaderIdOf

; - type TargetHeaderId = SourceHeaderIdOf

; - - type MessageNonce = MessageNonce; - type Proof = P::MessagesReceivingProof; - - fn source_name() -> String { - format!("{}::ReceivingConfirmationsDelivery", P::TARGET_NAME) - } - - fn target_name() -> String { - format!("{}::ReceivingConfirmationsDelivery", P::SOURCE_NAME) - } -} - -/// Message receiving confirmations race source, which is a target of the lane. -struct ReceivingConfirmationsRaceSource { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl SourceClient> for ReceivingConfirmationsRaceSource -where - P: MessageLane, - C: MessageLaneTargetClient

, -{ - type Error = C::Error; - type NoncesRange = RangeInclusive; - type ProofParameters = (); - - async fn nonces( - &self, - at_block: TargetHeaderIdOf

, - prev_latest_nonce: MessageNonce, - ) -> Result<(TargetHeaderIdOf

, SourceClientNonces), Self::Error> { - let (at_block, latest_received_nonce) = self.client.latest_received_nonce(at_block).await?; - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_target_latest_received_nonce(latest_received_nonce); - } - Ok(( - at_block, - SourceClientNonces { - new_nonces: prev_latest_nonce + 1..=latest_received_nonce, - confirmed_nonce: None, - }, - )) - } - - #[allow(clippy::unit_arg)] - async fn generate_proof( - &self, - at_block: TargetHeaderIdOf

, - nonces: RangeInclusive, - _proof_parameters: Self::ProofParameters, - ) -> Result< - (TargetHeaderIdOf

, RangeInclusive, P::MessagesReceivingProof), - Self::Error, - > { - self.client - .prove_messages_receiving(at_block) - .await - .map(|(at_block, proof)| (at_block, nonces, proof)) - } -} - -/// Message receiving confirmations race target, which is a source of the lane. -struct ReceivingConfirmationsRaceTarget { - client: C, - metrics_msg: Option, - _phantom: PhantomData

, -} - -#[async_trait] -impl TargetClient> for ReceivingConfirmationsRaceTarget -where - P: MessageLane, - C: MessageLaneSourceClient

, -{ - type Error = C::Error; - type TargetNoncesData = (); - type BatchTransaction = C::BatchTransaction; - type TransactionTracker = C::TransactionTracker; - - async fn require_source_header( - &self, - id: TargetHeaderIdOf

, - ) -> Result, Self::Error> { - self.client.require_target_header_on_source(id).await - } - - async fn nonces( - &self, - at_block: SourceHeaderIdOf

, - update_metrics: bool, - ) -> Result<(SourceHeaderIdOf

, TargetClientNonces<()>), Self::Error> { - let (at_block, latest_confirmed_nonce) = - self.client.latest_confirmed_received_nonce(at_block).await?; - if update_metrics { - if let Some(metrics_msg) = self.metrics_msg.as_ref() { - metrics_msg.update_source_latest_confirmed_nonce(latest_confirmed_nonce); - } - } - Ok((at_block, TargetClientNonces { latest_nonce: latest_confirmed_nonce, nonces_data: () })) - } - - async fn submit_proof( - &self, - maybe_batch_tx: Option, - generated_at_block: TargetHeaderIdOf

, - nonces: RangeInclusive, - proof: P::MessagesReceivingProof, - ) -> Result, Self::Error> { - let tx_tracker = self - .client - .submit_messages_receiving_proof(maybe_batch_tx, generated_at_block, proof) - .await?; - Ok(NoncesSubmitArtifacts { nonces, tx_tracker }) - } -} - -impl NoncesRange for RangeInclusive { - fn begin(&self) -> MessageNonce { - *RangeInclusive::::start(self) - } - - fn end(&self) -> MessageNonce { - *RangeInclusive::::end(self) - } - - fn greater_than(self, nonce: MessageNonce) -> Option { - let next_nonce = nonce + 1; - let end = *self.end(); - if next_nonce > end { - None - } else { - Some(std::cmp::max(self.begin(), next_nonce)..=end) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn range_inclusive_works_as_nonces_range() { - let range = 20..=30; - - assert_eq!(NoncesRange::begin(&range), 20); - assert_eq!(NoncesRange::end(&range), 30); - assert_eq!(range.clone().greater_than(10), Some(20..=30)); - assert_eq!(range.clone().greater_than(19), Some(20..=30)); - assert_eq!(range.clone().greater_than(20), Some(21..=30)); - assert_eq!(range.clone().greater_than(25), Some(26..=30)); - assert_eq!(range.clone().greater_than(29), Some(30..=30)); - assert_eq!(range.greater_than(30), None); - } -} diff --git a/relays/messages/src/message_race_strategy.rs b/relays/messages/src/message_race_strategy.rs deleted file mode 100644 index 93d178e55b04f64a9631f04b4e93b67594d67e54..0000000000000000000000000000000000000000 --- a/relays/messages/src/message_race_strategy.rs +++ /dev/null @@ -1,628 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -//! Basic delivery strategy. The strategy selects nonces if: -//! -//! 1) there are more nonces on the source side than on the target side; -//! 2) new nonces may be proved to target node (i.e. they have appeared at the block, which is known -//! to the target node). - -use crate::message_race_loop::{ - NoncesRange, RaceState, RaceStrategy, SourceClientNonces, TargetClientNonces, -}; - -use async_trait::async_trait; -use bp_messages::MessageNonce; -use relay_utils::HeaderId; -use std::{collections::VecDeque, fmt::Debug, marker::PhantomData, ops::RangeInclusive}; - -/// Queue of nonces known to the source node. -pub type SourceRangesQueue = - VecDeque<(HeaderId, SourceNoncesRange)>; - -/// Nonces delivery strategy. -#[derive(Debug)] -pub struct BasicStrategy< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, -> { - /// All queued nonces. - /// - /// The queue may contain already delivered nonces. We only remove entries from this - /// queue after corresponding nonces are finalized by the target chain. - source_queue: SourceRangesQueue, - /// The best nonce known to target node at its best block. `None` if it has not been received - /// yet. - best_target_nonce: Option, - /// Unused generic types dump. - _phantom: PhantomData<(TargetHeaderNumber, TargetHeaderHash, Proof)>, -} - -impl< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > - BasicStrategy< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > where - SourceHeaderHash: Clone, - SourceHeaderNumber: Clone + Ord, - SourceNoncesRange: NoncesRange, -{ - /// Create new delivery strategy. - pub fn new() -> Self { - BasicStrategy { - source_queue: VecDeque::new(), - best_target_nonce: None, - _phantom: Default::default(), - } - } - - /// Reference to source queue. - pub(crate) fn source_queue( - &self, - ) -> &VecDeque<(HeaderId, SourceNoncesRange)> { - &self.source_queue - } - - /// Mutable reference to source queue to use in tests. - #[cfg(test)] - pub(crate) fn source_queue_mut( - &mut self, - ) -> &mut VecDeque<(HeaderId, SourceNoncesRange)> { - &mut self.source_queue - } - - /// Returns indices of source queue entries, which may be delivered to the target node. - /// - /// The function may skip some nonces from the queue front if nonces from this entry are - /// already available at the **best** target block. After this block is finalized, the entry - /// will be removed from the queue. - /// - /// All entries before and including the range end index, are guaranteed to be witnessed - /// at source blocks that are known to be finalized at the target node. - /// - /// Returns `None` if no entries may be delivered. - pub fn available_source_queue_indices< - RS: RaceState< - HeaderId, - HeaderId, - >, - >( - &self, - race_state: RS, - ) -> Option> { - // if we do not know best nonce at target node, we can't select anything - let best_target_nonce = self.best_target_nonce?; - - // if we have already selected nonces that we want to submit, do nothing - if race_state.nonces_to_submit().is_some() { - return None - } - - // if we already submitted some nonces, do nothing - if race_state.nonces_submitted().is_some() { - return None - } - - // find first entry that may be delivered to the target node - let begin_index = self - .source_queue - .iter() - .enumerate() - .skip_while(|(_, (_, nonces))| nonces.end() <= best_target_nonce) - .map(|(index, _)| index) - .next()?; - - // 1) we want to deliver all nonces, starting from `target_nonce + 1` - // 2) we can't deliver new nonce until header, that has emitted this nonce, is finalized - // by target client - // 3) selector is used for more complicated logic - // - // => let's first select range of entries inside deque that are already finalized at - // the target client and pass this range to the selector - let best_header_at_target = race_state.best_finalized_source_header_id_at_best_target()?; - let end_index = self - .source_queue - .iter() - .enumerate() - .skip(begin_index) - .take_while(|(_, (queued_at, _))| queued_at.0 <= best_header_at_target.0) - .map(|(index, _)| index) - .last()?; - - Some(begin_index..=end_index) - } - - /// Remove all nonces that are less than or equal to given nonce from the source queue. - fn remove_le_nonces_from_source_queue(&mut self, nonce: MessageNonce) { - while let Some((queued_at, queued_range)) = self.source_queue.pop_front() { - if let Some(range_to_requeue) = queued_range.greater_than(nonce) { - self.source_queue.push_front((queued_at, range_to_requeue)); - break - } - } - } -} - -#[async_trait] -impl< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > - RaceStrategy< - HeaderId, - HeaderId, - Proof, - > - for BasicStrategy< - SourceHeaderNumber, - SourceHeaderHash, - TargetHeaderNumber, - TargetHeaderHash, - SourceNoncesRange, - Proof, - > where - SourceHeaderHash: Clone + Debug + Send + Sync, - SourceHeaderNumber: Clone + Ord + Debug + Send + Sync, - SourceNoncesRange: NoncesRange + Debug + Send + Sync, - TargetHeaderHash: Debug + Send + Sync, - TargetHeaderNumber: Debug + Send + Sync, - Proof: Debug + Send + Sync, -{ - type SourceNoncesRange = SourceNoncesRange; - type ProofParameters = (); - type TargetNoncesData = (); - - fn is_empty(&self) -> bool { - self.source_queue.is_empty() - } - - async fn required_source_header_at_target< - RS: RaceState< - HeaderId, - HeaderId, - >, - >( - &self, - race_state: RS, - ) -> Option> { - let current_best = race_state.best_finalized_source_header_id_at_best_target()?; - self.source_queue - .back() - .and_then(|(h, _)| if h.0 > current_best.0 { Some(h.clone()) } else { None }) - } - - fn best_at_source(&self) -> Option { - let best_in_queue = self.source_queue.back().map(|(_, range)| range.end()); - match (best_in_queue, self.best_target_nonce) { - (Some(best_in_queue), Some(best_target_nonce)) if best_in_queue > best_target_nonce => - Some(best_in_queue), - (_, Some(best_target_nonce)) => Some(best_target_nonce), - (_, None) => None, - } - } - - fn best_at_target(&self) -> Option { - self.best_target_nonce - } - - fn source_nonces_updated( - &mut self, - at_block: HeaderId, - nonces: SourceClientNonces, - ) { - let best_in_queue = self - .source_queue - .back() - .map(|(_, range)| range.end()) - .or(self.best_target_nonce) - .unwrap_or_default(); - self.source_queue.extend( - nonces - .new_nonces - .greater_than(best_in_queue) - .into_iter() - .map(move |range| (at_block.clone(), range)), - ) - } - - fn reset_best_target_nonces(&mut self) { - self.best_target_nonce = None; - } - - fn best_target_nonces_updated< - RS: RaceState< - HeaderId, - HeaderId, - >, - >( - &mut self, - nonces: TargetClientNonces<()>, - race_state: &mut RS, - ) { - let nonce = nonces.latest_nonce; - - // if **some** of nonces that we have selected to submit already present at the - // target chain => select new nonces - let need_to_select_new_nonces = race_state - .nonces_to_submit() - .map(|nonces| nonce >= *nonces.start()) - .unwrap_or(false); - if need_to_select_new_nonces { - log::trace!( - target: "bridge", - "Latest nonce at target is {}. Clearing nonces to submit: {:?}", - nonce, - race_state.nonces_to_submit(), - ); - - race_state.reset_nonces_to_submit(); - } - - // if **some** of nonces that we have submitted already present at the - // target chain => select new nonces - let need_new_nonces_to_submit = race_state - .nonces_submitted() - .map(|nonces| nonce >= *nonces.start()) - .unwrap_or(false); - if need_new_nonces_to_submit { - log::trace!( - target: "bridge", - "Latest nonce at target is {}. Clearing submitted nonces: {:?}", - nonce, - race_state.nonces_submitted(), - ); - - race_state.reset_nonces_submitted(); - } - - self.best_target_nonce = Some(nonce); - } - - fn finalized_target_nonces_updated< - RS: RaceState< - HeaderId, - HeaderId, - >, - >( - &mut self, - nonces: TargetClientNonces<()>, - _race_state: &mut RS, - ) { - self.remove_le_nonces_from_source_queue(nonces.latest_nonce); - self.best_target_nonce = Some(std::cmp::max( - self.best_target_nonce.unwrap_or(nonces.latest_nonce), - nonces.latest_nonce, - )); - } - - async fn select_nonces_to_deliver< - RS: RaceState< - HeaderId, - HeaderId, - >, - >( - &self, - race_state: RS, - ) -> Option<(RangeInclusive, Self::ProofParameters)> { - let available_indices = self.available_source_queue_indices(race_state)?; - let range_begin = std::cmp::max( - self.best_target_nonce? + 1, - self.source_queue[*available_indices.start()].1.begin(), - ); - let range_end = self.source_queue[*available_indices.end()].1.end(); - Some((range_begin..=range_end, ())) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - message_lane::{MessageLane, SourceHeaderIdOf, TargetHeaderIdOf}, - message_lane_loop::tests::{ - header_id, TestMessageLane, TestMessagesProof, TestSourceHeaderHash, - TestSourceHeaderNumber, - }, - message_race_loop::RaceStateImpl, - }; - - type SourceNoncesRange = RangeInclusive; - - type TestRaceStateImpl = RaceStateImpl< - SourceHeaderIdOf, - TargetHeaderIdOf, - TestMessagesProof, - (), - >; - - type BasicStrategy

= super::BasicStrategy< -

::SourceHeaderNumber, -

::SourceHeaderHash, -

::TargetHeaderNumber, -

::TargetHeaderHash, - SourceNoncesRange, -

::MessagesProof, - >; - - fn source_nonces(new_nonces: SourceNoncesRange) -> SourceClientNonces { - SourceClientNonces { new_nonces, confirmed_nonce: None } - } - - fn target_nonces(latest_nonce: MessageNonce) -> TargetClientNonces<()> { - TargetClientNonces { latest_nonce, nonces_data: () } - } - - #[test] - fn strategy_is_empty_works() { - let mut strategy = BasicStrategy::::new(); - assert!(strategy.is_empty()); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); - assert!(!strategy.is_empty()); - } - - #[test] - fn best_at_source_is_never_lower_than_target_nonce() { - let mut strategy = BasicStrategy::::new(); - assert_eq!(strategy.best_at_source(), None); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - assert_eq!(strategy.best_at_source(), None); - strategy.best_target_nonces_updated(target_nonces(10), &mut TestRaceStateImpl::default()); - assert_eq!(strategy.source_queue, vec![(header_id(1), 1..=5)]); - assert_eq!(strategy.best_at_source(), Some(10)); - } - - #[test] - fn source_nonce_is_never_lower_than_known_target_nonce() { - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(10), &mut TestRaceStateImpl::default()); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - assert_eq!(strategy.source_queue, vec![]); - } - - #[test] - fn source_nonce_is_never_lower_than_latest_known_source_nonce() { - let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - strategy.source_nonces_updated(header_id(2), source_nonces(1..=3)); - strategy.source_nonces_updated(header_id(2), source_nonces(1..=5)); - assert_eq!(strategy.source_queue, vec![(header_id(1), 1..=5)]); - } - - #[test] - fn updated_target_nonce_removes_queued_entries() { - let mut strategy = BasicStrategy::::new(); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=5)); - strategy.source_nonces_updated(header_id(2), source_nonces(6..=10)); - strategy.source_nonces_updated(header_id(3), source_nonces(11..=15)); - strategy.source_nonces_updated(header_id(4), source_nonces(16..=20)); - strategy - .finalized_target_nonces_updated(target_nonces(15), &mut TestRaceStateImpl::default()); - assert_eq!(strategy.source_queue, vec![(header_id(4), 16..=20)]); - strategy - .finalized_target_nonces_updated(target_nonces(17), &mut TestRaceStateImpl::default()); - assert_eq!(strategy.source_queue, vec![(header_id(4), 18..=20)]); - } - - #[test] - fn selected_nonces_are_dropped_on_target_nonce_update() { - let mut state = TestRaceStateImpl::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_to_submit = Some((header_id(1), 5..=10, (5..=10, None))); - // we are going to submit 5..=10, so having latest nonce 4 at target is fine - strategy.best_target_nonces_updated(target_nonces(4), &mut state); - assert!(state.nonces_to_submit.is_some()); - // any nonce larger than 4 invalidates the `nonces_to_submit` - for nonce in 5..=11 { - state.nonces_to_submit = Some((header_id(1), 5..=10, (5..=10, None))); - strategy.best_target_nonces_updated(target_nonces(nonce), &mut state); - assert!(state.nonces_to_submit.is_none()); - } - } - - #[test] - fn submitted_nonces_are_dropped_on_target_nonce_update() { - let mut state = TestRaceStateImpl::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_submitted = Some(5..=10); - // we have submitted 5..=10, so having latest nonce 4 at target is fine - strategy.best_target_nonces_updated(target_nonces(4), &mut state); - assert!(state.nonces_submitted.is_some()); - // any nonce larger than 4 invalidates the `nonces_submitted` - for nonce in 5..=11 { - state.nonces_submitted = Some(5..=10); - strategy.best_target_nonces_updated(target_nonces(nonce), &mut state); - assert!(state.nonces_submitted.is_none()); - } - } - - #[async_std::test] - async fn nothing_is_selected_if_something_is_already_selected() { - let mut state = TestRaceStateImpl::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_to_submit = Some((header_id(1), 1..=10, (1..=10, None))); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - } - - #[async_std::test] - async fn nothing_is_selected_if_something_is_already_submitted() { - let mut state = TestRaceStateImpl::default(); - let mut strategy = BasicStrategy::::new(); - state.nonces_submitted = Some(1..=10); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=10)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - } - - #[async_std::test] - async fn select_nonces_to_deliver_works() { - let mut state = TestRaceStateImpl::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=1)); - strategy.source_nonces_updated(header_id(2), source_nonces(2..=2)); - strategy.source_nonces_updated(header_id(3), source_nonces(3..=6)); - strategy.source_nonces_updated(header_id(5), source_nonces(7..=8)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((1..=6, ()))); - strategy.best_target_nonces_updated(target_nonces(6), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(5)); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((7..=8, ()))); - strategy.best_target_nonces_updated(target_nonces(8), &mut state); - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, None); - } - - #[test] - fn available_source_queue_indices_works() { - let mut state = TestRaceStateImpl::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=3)); - strategy.source_nonces_updated(header_id(2), source_nonces(4..=6)); - strategy.source_nonces_updated(header_id(3), source_nonces(7..=9)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(0)); - assert_eq!(strategy.available_source_queue_indices(state.clone()), None); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(1)); - assert_eq!(strategy.available_source_queue_indices(state.clone()), Some(0..=0)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(2)); - assert_eq!(strategy.available_source_queue_indices(state.clone()), Some(0..=1)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(3)); - assert_eq!(strategy.available_source_queue_indices(state.clone()), Some(0..=2)); - - state.best_finalized_source_header_id_at_best_target = Some(header_id(4)); - assert_eq!(strategy.available_source_queue_indices(state), Some(0..=2)); - } - - #[test] - fn remove_le_nonces_from_source_queue_works() { - let mut state = TestRaceStateImpl::default(); - let mut strategy = BasicStrategy::::new(); - strategy.best_target_nonces_updated(target_nonces(0), &mut state); - strategy.source_nonces_updated(header_id(1), source_nonces(1..=3)); - strategy.source_nonces_updated(header_id(2), source_nonces(4..=6)); - strategy.source_nonces_updated(header_id(3), source_nonces(7..=9)); - - fn source_queue_nonces( - source_queue: &SourceRangesQueue< - TestSourceHeaderHash, - TestSourceHeaderNumber, - SourceNoncesRange, - >, - ) -> Vec { - source_queue.iter().flat_map(|(_, range)| range.clone()).collect() - } - - strategy.remove_le_nonces_from_source_queue(1); - assert_eq!(source_queue_nonces(&strategy.source_queue), vec![2, 3, 4, 5, 6, 7, 8, 9],); - - strategy.remove_le_nonces_from_source_queue(5); - assert_eq!(source_queue_nonces(&strategy.source_queue), vec![6, 7, 8, 9],); - - strategy.remove_le_nonces_from_source_queue(9); - assert_eq!(source_queue_nonces(&strategy.source_queue), Vec::::new(),); - - strategy.remove_le_nonces_from_source_queue(100); - assert_eq!(source_queue_nonces(&strategy.source_queue), Vec::::new(),); - } - - #[async_std::test] - async fn previous_nonces_are_selected_if_reorg_happens_at_target_chain() { - let source_header_1 = header_id(1); - let target_header_1 = header_id(1); - - // we start in perfec sync state - all headers are synced and finalized on both ends - let mut state = TestRaceStateImpl { - best_finalized_source_header_id_at_source: Some(source_header_1), - best_finalized_source_header_id_at_best_target: Some(source_header_1), - best_target_header_id: Some(target_header_1), - best_finalized_target_header_id: Some(target_header_1), - nonces_to_submit: None, - nonces_to_submit_batch: None, - nonces_submitted: None, - }; - - // in this state we have 1 available nonce for delivery - let mut strategy = BasicStrategy:: { - source_queue: vec![(header_id(1), 1..=1)].into_iter().collect(), - best_target_nonce: Some(0), - _phantom: PhantomData, - }; - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((1..=1, ())),); - - // let's say we have submitted 1..=1 - state.nonces_submitted = Some(1..=1); - - // then new nonce 2 appear at the source block 2 - let source_header_2 = header_id(2); - state.best_finalized_source_header_id_at_source = Some(source_header_2); - strategy.source_nonces_updated( - source_header_2, - SourceClientNonces { new_nonces: 2..=2, confirmed_nonce: None }, - ); - // and nonce 1 appear at the best block of the target node (best finalized still has 0 - // nonces) - let target_header_2 = header_id(2); - state.best_target_header_id = Some(target_header_2); - strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce: 1, nonces_data: () }, - &mut state, - ); - - // then best target header is retracted - strategy.best_target_nonces_updated( - TargetClientNonces { latest_nonce: 0, nonces_data: () }, - &mut state, - ); - - // ... and some fork with zero delivered nonces is finalized - let target_header_2_fork = header_id(2_1); - state.best_finalized_source_header_id_at_source = Some(source_header_2); - state.best_finalized_source_header_id_at_best_target = Some(source_header_2); - state.best_target_header_id = Some(target_header_2_fork); - state.best_finalized_target_header_id = Some(target_header_2_fork); - strategy.finalized_target_nonces_updated( - TargetClientNonces { latest_nonce: 0, nonces_data: () }, - &mut state, - ); - - // now we have to select nonce 1 for delivery again - assert_eq!(strategy.select_nonces_to_deliver(state.clone()).await, Some((1..=2, ())),); - } -} diff --git a/relays/messages/src/metrics.rs b/relays/messages/src/metrics.rs deleted file mode 100644 index 69d80d178de809211b1874965adcf02fd76a66b8..0000000000000000000000000000000000000000 --- a/relays/messages/src/metrics.rs +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Metrics for message lane relay loop. - -use crate::{ - message_lane::MessageLane, - message_lane_loop::{SourceClientState, TargetClientState}, -}; - -use bp_messages::MessageNonce; -use finality_relay::SyncLoopMetrics; -use relay_utils::metrics::{ - metric_name, register, GaugeVec, Metric, Opts, PrometheusError, Registry, U64, -}; - -/// Message lane relay metrics. -/// -/// Cloning only clones references. -#[derive(Clone)] -pub struct MessageLaneLoopMetrics { - /// Best finalized block numbers - "source", "source_at_target", "target_at_source". - source_to_target_finality_metrics: SyncLoopMetrics, - /// Best finalized block numbers - "source", "target", "source_at_target", "target_at_source". - target_to_source_finality_metrics: SyncLoopMetrics, - /// Lane state nonces: "source_latest_generated", "source_latest_confirmed", - /// "target_latest_received", "target_latest_confirmed". - lane_state_nonces: GaugeVec, -} - -impl MessageLaneLoopMetrics { - /// Create and register messages loop metrics. - pub fn new(prefix: Option<&str>) -> Result { - Ok(MessageLaneLoopMetrics { - source_to_target_finality_metrics: SyncLoopMetrics::new( - prefix, - "source", - "source_at_target", - )?, - target_to_source_finality_metrics: SyncLoopMetrics::new( - prefix, - "target", - "target_at_source", - )?, - lane_state_nonces: GaugeVec::new( - Opts::new(metric_name(prefix, "lane_state_nonces"), "Nonces of the lane state"), - &["type"], - )?, - }) - } - - /// Update source client state metrics. - pub fn update_source_state(&self, source_client_state: SourceClientState

) { - self.source_to_target_finality_metrics - .update_best_block_at_source(source_client_state.best_self.0); - if let Some(best_finalized_peer_at_best_self) = - source_client_state.best_finalized_peer_at_best_self - { - self.target_to_source_finality_metrics - .update_best_block_at_target(best_finalized_peer_at_best_self.0); - if let Some(actual_best_finalized_peer_at_best_self) = - source_client_state.actual_best_finalized_peer_at_best_self - { - self.target_to_source_finality_metrics.update_using_same_fork( - best_finalized_peer_at_best_self.1 == actual_best_finalized_peer_at_best_self.1, - ); - } - } - } - - /// Update target client state metrics. - pub fn update_target_state(&self, target_client_state: TargetClientState

) { - self.target_to_source_finality_metrics - .update_best_block_at_source(target_client_state.best_self.0); - if let Some(best_finalized_peer_at_best_self) = - target_client_state.best_finalized_peer_at_best_self - { - self.source_to_target_finality_metrics - .update_best_block_at_target(best_finalized_peer_at_best_self.0); - if let Some(actual_best_finalized_peer_at_best_self) = - target_client_state.actual_best_finalized_peer_at_best_self - { - self.source_to_target_finality_metrics.update_using_same_fork( - best_finalized_peer_at_best_self.1 == actual_best_finalized_peer_at_best_self.1, - ); - } - } - } - - /// Update latest generated nonce at source. - pub fn update_source_latest_generated_nonce( - &self, - source_latest_generated_nonce: MessageNonce, - ) { - self.lane_state_nonces - .with_label_values(&["source_latest_generated"]) - .set(source_latest_generated_nonce); - } - - /// Update the latest confirmed nonce at source. - pub fn update_source_latest_confirmed_nonce( - &self, - source_latest_confirmed_nonce: MessageNonce, - ) { - self.lane_state_nonces - .with_label_values(&["source_latest_confirmed"]) - .set(source_latest_confirmed_nonce); - } - - /// Update the latest received nonce at target. - pub fn update_target_latest_received_nonce(&self, target_latest_generated_nonce: MessageNonce) { - self.lane_state_nonces - .with_label_values(&["target_latest_received"]) - .set(target_latest_generated_nonce); - } - - /// Update the latest confirmed nonce at target. - pub fn update_target_latest_confirmed_nonce( - &self, - target_latest_confirmed_nonce: MessageNonce, - ) { - self.lane_state_nonces - .with_label_values(&["target_latest_confirmed"]) - .set(target_latest_confirmed_nonce); - } -} - -impl Metric for MessageLaneLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - self.source_to_target_finality_metrics.register(registry)?; - self.target_to_source_finality_metrics.register(registry)?; - register(self.lane_state_nonces.clone(), registry)?; - Ok(()) - } -} diff --git a/relays/parachains/Cargo.toml b/relays/parachains/Cargo.toml deleted file mode 100644 index 9dc35343b48c607048bd9219dc34dce3c6c80bfb..0000000000000000000000000000000000000000 --- a/relays/parachains/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "parachains-relay" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -async-std = "1.6.5" -async-trait = "0.1" -futures = "0.3.30" -log = { workspace = true } -relay-utils = { path = "../utils" } - -# Bridge dependencies - -bp-polkadot-core = { path = "../../primitives/polkadot-core" } -relay-substrate-client = { path = "../client-substrate" } - -[dev-dependencies] -codec = { package = "parity-scale-codec", version = "3.1.5" } -relay-substrate-client = { path = "../client-substrate", features = ["test-helpers"] } -sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/parachains/README.md b/relays/parachains/README.md deleted file mode 100644 index bacd28594d8076f352a0eadf0d1176b1aac672a4..0000000000000000000000000000000000000000 --- a/relays/parachains/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Parachains Finality Relay - -The parachains finality relay works with two chains - source relay chain and target chain (which may be standalone -chain, relay chain or a parachain). The source chain must have the -[`paras` pallet](https://github.com/paritytech/polkadot/tree/master/runtime/parachains/src/paras) deployed at its -runtime. The target chain must have the [bridge parachains pallet](../../modules/parachains/) deployed at its runtime. - -The relay is configured to submit heads of one or several parachains. It pokes source chain periodically and compares -parachain heads that are known to the source relay chain to heads at the target chain. If there are new heads, -the relay submits them to the target chain. - -More: [Parachains Finality Relay Sequence Diagram](../../docs/parachains-finality-relay.html). - -## How to Use the Parachains Finality Relay - -There are only two traits that need to be implemented. The [`SourceChain`](./src/parachains_loop.rs) implementation -is supposed to connect to the source chain node. It must be able to read parachain heads from the `Heads` map of -the [`paras` pallet](https://github.com/paritytech/polkadot/tree/master/runtime/parachains/src/paras). -It also must create storage proofs of `Heads` map entries, when required. - -The [`TargetChain`](./src/parachains_loop.rs) implementation connects to the target chain node. It must be able -to return the best known head of given parachain. When required, it must be able to craft and submit parachains -finality delivery transaction to the target node. - -The main entrypoint for the crate is the [`run` function](./src/parachains_loop.rs), which takes source and target -clients and [`ParachainSyncParams`](./src/parachains_loop.rs) parameters. The most imporant parameter is the -`parachains` - it is the set of parachains, which relay tracks and updates. The other important parameter that -may affect the relay operational costs is the `strategy`. If it is set to `Any`, then the finality delivery -transaction is submitted if at least one of tracked parachain heads is updated. The other option is `All`. Then -the relay waits until all tracked parachain heads are updated and submits them all in a single finality delivery -transaction. - -## Parachain Finality Relay Metrics - -Every parachain in Polkadot is identified by the 32-bit number. All metrics, exposed by the parachains finality -relay have the `parachain` label, which is set to the parachain id. And the metrics are prefixed with the prefix, -that depends on the name of the source relay and target chains. The list below shows metrics names for -Rococo (source relay chain) to BridgeHubWestend (target chain) parachains finality relay. For other chains, simply -change chain names. So the metrics are: - -- `Rococo_to_BridgeHubWestend_Parachains_best_parachain_block_number_at_source` - returns best known parachain block - number, registered in the `paras` pallet at the source relay chain (Rococo in our example); - -- `Rococo_to_BridgeHubWestend_Parachains_best_parachain_block_number_at_target` - returns best known parachain block - number, registered in the bridge parachains pallet at the target chain (BridgeHubWestend in our example). - -If relay operates properly, you should see that the `Rococo_to_BridgeHubWestend_Parachains_best_parachain_block_number_at_target` -tries to reach the `Rococo_to_BridgeHubWestend_Parachains_best_parachain_block_number_at_source`. And the latter one -always increases. diff --git a/relays/parachains/src/lib.rs b/relays/parachains/src/lib.rs deleted file mode 100644 index 81ea983a6f76a71cb2d9f2e0264bba9e2481fab0..0000000000000000000000000000000000000000 --- a/relays/parachains/src/lib.rs +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use std::fmt::Debug; - -use relay_substrate_client::{Chain, Parachain}; - -pub mod parachains_loop; -pub mod parachains_loop_metrics; - -/// Finality proofs synchronization pipeline. -pub trait ParachainsPipeline: 'static + Clone + Debug + Send + Sync { - /// Relay chain which is storing parachain heads in its `paras` module. - type SourceRelayChain: Chain; - /// Parachain which headers we are syncing here. - type SourceParachain: Parachain; - /// Target chain (either relay or para) which wants to know about new parachain heads. - type TargetChain: Chain; -} diff --git a/relays/parachains/src/parachains_loop.rs b/relays/parachains/src/parachains_loop.rs deleted file mode 100644 index 55f236eeac1d842ff9ed0f337cf1ea2c725dc763..0000000000000000000000000000000000000000 --- a/relays/parachains/src/parachains_loop.rs +++ /dev/null @@ -1,1220 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{parachains_loop_metrics::ParachainsLoopMetrics, ParachainsPipeline}; - -use async_trait::async_trait; -use bp_polkadot_core::{ - parachains::{ParaHash, ParaHeadsProof, ParaId}, - BlockNumber as RelayBlockNumber, -}; -use futures::{ - future::{FutureExt, Shared}, - poll, select_biased, -}; -use relay_substrate_client::{BlockNumberOf, Chain, HeaderIdOf, ParachainBase}; -use relay_utils::{ - metrics::MetricsParams, relay_loop::Client as RelayClient, FailedClient, - TrackedTransactionStatus, TransactionTracker, -}; -use std::{future::Future, pin::Pin, task::Poll}; - -/// Parachain header availability at a certain chain. -#[derive(Clone, Copy, Debug)] -pub enum AvailableHeader { - /// The client can not report actual parachain head at this moment. - /// - /// It is a "mild" error, which may appear when e.g. on-demand parachains relay is used. - /// This variant must be treated as "we don't want to update parachain head value at the - /// target chain at this moment". - Unavailable, - /// There's no parachain header at the relay chain. - /// - /// Normally it means that the parachain is not registered there. - Missing, - /// Parachain head with given hash is available at the source chain. - Available(T), -} - -impl AvailableHeader { - /// Return available header. - pub fn as_available(&self) -> Option<&T> { - match *self { - AvailableHeader::Available(ref header) => Some(header), - _ => None, - } - } -} - -impl From> for AvailableHeader { - fn from(maybe_header: Option) -> AvailableHeader { - match maybe_header { - Some(header) => AvailableHeader::Available(header), - None => AvailableHeader::Missing, - } - } -} - -/// Source client used in parachain heads synchronization loop. -#[async_trait] -pub trait SourceClient: RelayClient { - /// Returns `Ok(true)` if client is in synced state. - async fn ensure_synced(&self) -> Result; - - /// Get parachain head id at given block. - async fn parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result>, Self::Error>; - - /// Get parachain head proof at given block. - async fn prove_parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result<(ParaHeadsProof, ParaHash), Self::Error>; -} - -/// Target client used in parachain heads synchronization loop. -#[async_trait] -pub trait TargetClient: RelayClient { - /// Transaction tracker to track submitted transactions. - type TransactionTracker: TransactionTracker>; - - /// Get best block id. - async fn best_block(&self) -> Result, Self::Error>; - - /// Get best finalized source relay chain block id. If `free_source_relay_headers_interval` - /// is `Some(_)`, the returned - async fn best_finalized_source_relay_chain_block( - &self, - at_block: &HeaderIdOf, - ) -> Result, Self::Error>; - /// Get free source **relay** headers submission interval, if it is configured in the - /// target runtime. We assume that the target chain will accept parachain header, proved - /// at such relay header for free. - async fn free_source_relay_headers_interval( - &self, - ) -> Result>, Self::Error>; - - /// Get parachain head id at given block. - async fn parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result< - Option<(HeaderIdOf, HeaderIdOf)>, - Self::Error, - >; - - /// Submit parachain heads proof. - async fn submit_parachain_head_proof( - &self, - at_source_block: HeaderIdOf, - para_head_hash: ParaHash, - proof: ParaHeadsProof, - is_free_execution_expected: bool, - ) -> Result; -} - -/// Return prefix that will be used by default to expose Prometheus metrics of the parachains -/// sync loop. -pub fn metrics_prefix() -> String { - format!( - "{}_to_{}_Parachains_{}", - P::SourceRelayChain::NAME, - P::TargetChain::NAME, - P::SourceParachain::PARACHAIN_ID - ) -} - -/// Run parachain heads synchronization. -pub async fn run( - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics_params: MetricsParams, - only_free_headers: bool, - exit_signal: impl Future + 'static + Send, -) -> Result<(), relay_utils::Error> -where - P::SourceRelayChain: Chain, -{ - let exit_signal = exit_signal.shared(); - relay_utils::relay_loop(source_client, target_client) - .with_metrics(metrics_params) - .loop_metric(ParachainsLoopMetrics::new(Some(&metrics_prefix::

()))?)? - .expose() - .await? - .run(metrics_prefix::

(), move |source_client, target_client, metrics| { - run_until_connection_lost( - source_client, - target_client, - metrics, - only_free_headers, - exit_signal.clone(), - ) - }) - .await -} - -/// Run parachain heads synchronization. -async fn run_until_connection_lost( - source_client: impl SourceClient

, - target_client: impl TargetClient

, - metrics: Option, - only_free_headers: bool, - exit_signal: impl Future + Send, -) -> Result<(), FailedClient> -where - P::SourceRelayChain: Chain, -{ - let exit_signal = exit_signal.fuse(); - let min_block_interval = std::cmp::min( - P::SourceRelayChain::AVERAGE_BLOCK_INTERVAL, - P::TargetChain::AVERAGE_BLOCK_INTERVAL, - ); - - // free parachain header = header, available (proved) at free relay chain block. Let's - // read interval of free source relay chain blocks from target client - let free_source_relay_headers_interval = if only_free_headers { - let free_source_relay_headers_interval = - target_client.free_source_relay_headers_interval().await.map_err(|e| { - log::warn!( - target: "bridge", - "Failed to read free {} headers interval at {}: {:?}", - P::SourceRelayChain::NAME, - P::TargetChain::NAME, - e, - ); - FailedClient::Target - })?; - match free_source_relay_headers_interval { - Some(free_source_relay_headers_interval) if free_source_relay_headers_interval != 0 => { - log::trace!( - target: "bridge", - "Free {} headers interval at {}: {:?}", - P::SourceRelayChain::NAME, - P::TargetChain::NAME, - free_source_relay_headers_interval, - ); - free_source_relay_headers_interval - }, - _ => { - log::warn!( - target: "bridge", - "Invalid free {} headers interval at {}: {:?}", - P::SourceRelayChain::NAME, - P::TargetChain::NAME, - free_source_relay_headers_interval, - ); - return Err(FailedClient::Target) - }, - } - } else { - // ignore - we don't need it - 0 - }; - - let mut submitted_heads_tracker: Option> = None; - - futures::pin_mut!(exit_signal); - - // Note that the internal loop breaks with `FailedClient` error even if error is non-connection. - // It is Ok for now, but it may need to be fixed in the future to use exponential backoff for - // regular errors. - - loop { - // Either wait for new block, or exit signal. - // Please note that we are prioritizing the exit signal since if both events happen at once - // it doesn't make sense to perform one more loop iteration. - select_biased! { - _ = exit_signal => return Ok(()), - _ = async_std::task::sleep(min_block_interval).fuse() => {}, - } - - // if source client is not yet synced, we'll need to sleep. Otherwise we risk submitting too - // much redundant transactions - match source_client.ensure_synced().await { - Ok(true) => (), - Ok(false) => { - log::warn!( - target: "bridge", - "{} client is syncing. Won't do anything until it is synced", - P::SourceRelayChain::NAME, - ); - continue - }, - Err(e) => { - log::warn!( - target: "bridge", - "{} client has failed to return its sync status: {:?}", - P::SourceRelayChain::NAME, - e, - ); - return Err(FailedClient::Source) - }, - } - - // if we have active transaction, we'll need to wait until it is mined or dropped - let best_target_block = target_client.best_block().await.map_err(|e| { - log::warn!(target: "bridge", "Failed to read best {} block: {:?}", P::SourceRelayChain::NAME, e); - FailedClient::Target - })?; - let (relay_of_head_at_target, head_at_target) = - read_head_at_target(&target_client, metrics.as_ref(), &best_target_block).await?; - - // check if our transaction has been mined - if let Some(tracker) = submitted_heads_tracker.take() { - match tracker.update(&best_target_block, &head_at_target).await { - SubmittedHeadStatus::Waiting(tracker) => { - // no news about our transaction and we shall keep waiting - submitted_heads_tracker = Some(tracker); - continue - }, - SubmittedHeadStatus::Final(TrackedTransactionStatus::Finalized(_)) => { - // all heads have been updated, we don't need this tracker anymore - }, - SubmittedHeadStatus::Final(TrackedTransactionStatus::Lost) => { - log::warn!( - target: "bridge", - "Parachains synchronization from {} to {} has stalled. Going to restart", - P::SourceRelayChain::NAME, - P::TargetChain::NAME, - ); - - return Err(FailedClient::Both) - }, - } - } - - // in all-headers strategy we'll be submitting para head, available at - // `best_finalized_relay_block_at_target` - let best_finalized_relay_block_at_target = target_client - .best_finalized_source_relay_chain_block(&best_target_block) - .await - .map_err(|e| { - log::warn!( - target: "bridge", - "Failed to read best finalized {} block from {}: {:?}", - P::SourceRelayChain::NAME, - P::TargetChain::NAME, - e, - ); - FailedClient::Target - })?; - - // ..but if we only need to submit free headers, we need to submit para - // head, available at best free source relay chain header, known to the - // target chain - let prove_at_relay_block = if only_free_headers { - match relay_of_head_at_target { - Some(relay_of_head_at_target) => { - // find last free relay chain header in the range that we are interested in - let scan_range_begin = relay_of_head_at_target.number(); - let scan_range_end = best_finalized_relay_block_at_target.number(); - if scan_range_end.saturating_sub(scan_range_begin) < - free_source_relay_headers_interval - { - // there are no new **free** relay chain headers in the range - log::trace!( - target: "bridge", - "Waiting for new free {} headers at {}: scanned {:?}..={:?}", - P::SourceRelayChain::NAME, - P::TargetChain::NAME, - scan_range_begin, - scan_range_end, - ); - continue; - } - - // we may submit new parachain head for free - best_finalized_relay_block_at_target - }, - None => { - // no parachain head at target => let's submit first one - best_finalized_relay_block_at_target - }, - } - } else { - best_finalized_relay_block_at_target - }; - - // now let's check if we need to update parachain head at all - let head_at_source = - read_head_at_source(&source_client, metrics.as_ref(), &prove_at_relay_block).await?; - let is_update_required = is_update_required::

( - head_at_source, - head_at_target, - prove_at_relay_block, - best_target_block, - ); - - if is_update_required { - let (head_proof, head_hash) = - source_client.prove_parachain_head(prove_at_relay_block).await.map_err(|e| { - log::warn!( - target: "bridge", - "Failed to prove {} parachain ParaId({}) heads: {:?}", - P::SourceRelayChain::NAME, - P::SourceParachain::PARACHAIN_ID, - e, - ); - FailedClient::Source - })?; - log::info!( - target: "bridge", - "Submitting {} parachain ParaId({}) head update transaction to {}. Para hash at source relay {:?}: {:?}", - P::SourceRelayChain::NAME, - P::SourceParachain::PARACHAIN_ID, - P::TargetChain::NAME, - prove_at_relay_block, - head_hash, - ); - - let transaction_tracker = target_client - .submit_parachain_head_proof( - prove_at_relay_block, - head_hash, - head_proof, - only_free_headers, - ) - .await - .map_err(|e| { - log::warn!( - target: "bridge", - "Failed to submit {} parachain ParaId({}) heads proof to {}: {:?}", - P::SourceRelayChain::NAME, - P::SourceParachain::PARACHAIN_ID, - P::TargetChain::NAME, - e, - ); - FailedClient::Target - })?; - submitted_heads_tracker = - Some(SubmittedHeadsTracker::

::new(head_at_source, transaction_tracker)); - } - } -} - -/// Returns `true` if we need to submit parachain-head-update transaction. -fn is_update_required( - head_at_source: AvailableHeader>, - head_at_target: Option>, - prove_at_relay_block: HeaderIdOf, - best_target_block: HeaderIdOf, -) -> bool -where - P::SourceRelayChain: Chain, -{ - log::trace!( - target: "bridge", - "Checking if {} parachain ParaId({}) needs update at {}:\n\t\ - At {} ({:?}): {:?}\n\t\ - At {} ({:?}): {:?}", - P::SourceRelayChain::NAME, - P::SourceParachain::PARACHAIN_ID, - P::TargetChain::NAME, - P::SourceRelayChain::NAME, - prove_at_relay_block, - head_at_source, - P::TargetChain::NAME, - best_target_block, - head_at_target, - ); - - let needs_update = match (head_at_source, head_at_target) { - (AvailableHeader::Unavailable, _) => { - // source client has politely asked us not to update current parachain head - // at the target chain - false - }, - (AvailableHeader::Available(head_at_source), Some(head_at_target)) - if head_at_source.number() > head_at_target.number() => - { - // source client knows head that is better than the head known to the target - // client - true - }, - (AvailableHeader::Available(_), Some(_)) => { - // this is normal case when relay has recently updated heads, when parachain is - // not progressing, or when our source client is still syncing - false - }, - (AvailableHeader::Available(_), None) => { - // parachain is not yet known to the target client. This is true when parachain - // or bridge has been just onboarded/started - true - }, - (AvailableHeader::Missing, Some(_)) => { - // parachain/parathread has been offboarded removed from the system. It needs to - // be propageted to the target client - true - }, - (AvailableHeader::Missing, None) => { - // all's good - parachain is unknown to both clients - false - }, - }; - - if needs_update { - log::trace!( - target: "bridge", - "{} parachain ParaId({}) needs update at {}: {:?} vs {:?}", - P::SourceRelayChain::NAME, - P::SourceParachain::PARACHAIN_ID, - P::TargetChain::NAME, - head_at_source, - head_at_target, - ); - } - - needs_update -} - -/// Reads parachain head from the source client. -async fn read_head_at_source( - source_client: &impl SourceClient

, - metrics: Option<&ParachainsLoopMetrics>, - at_relay_block: &HeaderIdOf, -) -> Result>, FailedClient> { - let para_head = source_client.parachain_head(*at_relay_block).await; - match para_head { - Ok(AvailableHeader::Available(para_head)) => { - if let Some(metrics) = metrics { - metrics.update_best_parachain_block_at_source( - ParaId(P::SourceParachain::PARACHAIN_ID), - para_head.number(), - ); - } - Ok(AvailableHeader::Available(para_head)) - }, - Ok(r) => Ok(r), - Err(e) => { - log::warn!( - target: "bridge", - "Failed to read head of {} parachain ParaId({:?}): {:?}", - P::SourceRelayChain::NAME, - P::SourceParachain::PARACHAIN_ID, - e, - ); - Err(FailedClient::Source) - }, - } -} - -/// Reads parachain head from the target client. Also returns source relay chain header -/// that has been used to prove that head. -async fn read_head_at_target( - target_client: &impl TargetClient

, - metrics: Option<&ParachainsLoopMetrics>, - at_block: &HeaderIdOf, -) -> Result< - (Option>, Option>), - FailedClient, -> { - let para_head_id = target_client.parachain_head(*at_block).await; - match para_head_id { - Ok(Some((relay_header_id, para_head_id))) => { - if let Some(metrics) = metrics { - metrics.update_best_parachain_block_at_target( - ParaId(P::SourceParachain::PARACHAIN_ID), - para_head_id.number(), - ); - } - Ok((Some(relay_header_id), Some(para_head_id))) - }, - Ok(None) => Ok((None, None)), - Err(e) => { - log::warn!( - target: "bridge", - "Failed to read head of {} parachain ParaId({}) at {}: {:?}", - P::SourceRelayChain::NAME, - P::SourceParachain::PARACHAIN_ID, - P::TargetChain::NAME, - e, - ); - Err(FailedClient::Target) - }, - } -} - -/// Submitted heads status. -enum SubmittedHeadStatus { - /// Heads are not yet updated. - Waiting(SubmittedHeadsTracker

), - /// Heads transaction has either been finalized or lost (i.e. received its "final" status). - Final(TrackedTransactionStatus>), -} - -/// Type of the transaction tracker that the `SubmittedHeadsTracker` is using. -/// -/// It needs to be shared because of `poll` macro and our consuming `update` method. -type SharedTransactionTracker

= Shared< - Pin< - Box< - dyn Future< - Output = TrackedTransactionStatus< - HeaderIdOf<

::TargetChain>, - >, - > + Send, - >, - >, ->; - -/// Submitted parachain heads transaction. -struct SubmittedHeadsTracker { - /// Parachain header id that we have submitted. - submitted_head: AvailableHeader>, - /// Future that waits for submitted transaction finality or loss. - /// - /// It needs to be shared because of `poll` macro and our consuming `update` method. - transaction_tracker: SharedTransactionTracker

, -} - -impl SubmittedHeadsTracker

{ - /// Creates new parachain heads transaction tracker. - pub fn new( - submitted_head: AvailableHeader>, - transaction_tracker: impl TransactionTracker> + 'static, - ) -> Self { - SubmittedHeadsTracker { - submitted_head, - transaction_tracker: transaction_tracker.wait().fuse().boxed().shared(), - } - } - - /// Returns `None` if all submitted parachain heads have been updated. - pub async fn update( - self, - at_target_block: &HeaderIdOf, - head_at_target: &Option>, - ) -> SubmittedHeadStatus

{ - // check if our head has been updated - let is_head_updated = match (self.submitted_head, head_at_target) { - (AvailableHeader::Available(submitted_head), Some(head_at_target)) - if head_at_target.number() >= submitted_head.number() => - true, - (AvailableHeader::Missing, None) => true, - _ => false, - }; - if is_head_updated { - log::trace!( - target: "bridge", - "Head of parachain ParaId({}) has been updated at {}: {:?}", - P::SourceParachain::PARACHAIN_ID, - P::TargetChain::NAME, - head_at_target, - ); - - return SubmittedHeadStatus::Final(TrackedTransactionStatus::Finalized(*at_target_block)) - } - - // if underlying transaction tracker has reported that the transaction is lost, we may - // then restart our sync - let transaction_tracker = self.transaction_tracker.clone(); - match poll!(transaction_tracker) { - Poll::Ready(TrackedTransactionStatus::Lost) => - return SubmittedHeadStatus::Final(TrackedTransactionStatus::Lost), - Poll::Ready(TrackedTransactionStatus::Finalized(_)) => { - // so we are here and our transaction is mined+finalized, but some of heads were not - // updated => we're considering our loop as stalled - return SubmittedHeadStatus::Final(TrackedTransactionStatus::Lost) - }, - _ => (), - } - - SubmittedHeadStatus::Waiting(self) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use async_std::sync::{Arc, Mutex}; - use codec::Encode; - use futures::{SinkExt, StreamExt}; - use relay_substrate_client::test_chain::{TestChain, TestParachain}; - use relay_utils::{HeaderId, MaybeConnectionError}; - use sp_core::H256; - use std::collections::HashMap; - - const PARA_10_HASH: ParaHash = H256([10u8; 32]); - const PARA_20_HASH: ParaHash = H256([20u8; 32]); - - #[derive(Clone, Debug)] - enum TestError { - Error, - } - - impl MaybeConnectionError for TestError { - fn is_connection_error(&self) -> bool { - false - } - } - - #[derive(Clone, Debug, PartialEq, Eq)] - struct TestParachainsPipeline; - - impl ParachainsPipeline for TestParachainsPipeline { - type SourceRelayChain = TestChain; - type SourceParachain = TestParachain; - type TargetChain = TestChain; - } - - #[derive(Clone, Debug)] - struct TestClient { - data: Arc>, - } - - #[derive(Clone, Debug)] - struct TestTransactionTracker(Option>>); - - #[async_trait] - impl TransactionTracker for TestTransactionTracker { - type HeaderId = HeaderIdOf; - - async fn wait(self) -> TrackedTransactionStatus> { - match self.0 { - Some(status) => status, - None => futures::future::pending().await, - } - } - } - - #[derive(Clone, Debug)] - struct TestClientData { - source_sync_status: Result, - source_head: HashMap< - BlockNumberOf, - Result>, TestError>, - >, - source_proof: Result<(), TestError>, - - target_free_source_relay_headers_interval: - Result>, TestError>, - target_best_block: Result, TestError>, - target_best_finalized_source_block: Result, TestError>, - #[allow(clippy::type_complexity)] - target_head: Result, HeaderIdOf)>, TestError>, - target_submit_result: Result<(), TestError>, - - submitted_proof_at_source_relay_block: Option>, - exit_signal_sender: Option>>, - } - - impl TestClientData { - pub fn minimal() -> Self { - TestClientData { - source_sync_status: Ok(true), - source_head: vec![(0, Ok(AvailableHeader::Available(HeaderId(0, PARA_20_HASH))))] - .into_iter() - .collect(), - source_proof: Ok(()), - - target_free_source_relay_headers_interval: Ok(None), - target_best_block: Ok(HeaderId(0, Default::default())), - target_best_finalized_source_block: Ok(HeaderId(0, Default::default())), - target_head: Ok(None), - target_submit_result: Ok(()), - - submitted_proof_at_source_relay_block: None, - exit_signal_sender: None, - } - } - - pub fn with_exit_signal_sender( - sender: futures::channel::mpsc::UnboundedSender<()>, - ) -> Self { - let mut client = Self::minimal(); - client.exit_signal_sender = Some(Box::new(sender)); - client - } - } - - impl From for TestClient { - fn from(data: TestClientData) -> TestClient { - TestClient { data: Arc::new(Mutex::new(data)) } - } - } - - #[async_trait] - impl RelayClient for TestClient { - type Error = TestError; - - async fn reconnect(&mut self) -> Result<(), TestError> { - unimplemented!() - } - } - - #[async_trait] - impl SourceClient for TestClient { - async fn ensure_synced(&self) -> Result { - self.data.lock().await.source_sync_status.clone() - } - - async fn parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result>, TestError> { - self.data - .lock() - .await - .source_head - .get(&at_block.0) - .expect(&format!("SourceClient::parachain_head({})", at_block.0)) - .clone() - } - - async fn prove_parachain_head( - &self, - at_block: HeaderIdOf, - ) -> Result<(ParaHeadsProof, ParaHash), TestError> { - let head_result = - SourceClient::::parachain_head(self, at_block).await?; - let head = head_result.as_available().unwrap(); - let storage_proof = vec![head.hash().encode()]; - let proof = (ParaHeadsProof { storage_proof }, head.hash()); - self.data.lock().await.source_proof.clone().map(|_| proof) - } - } - - #[async_trait] - impl TargetClient for TestClient { - type TransactionTracker = TestTransactionTracker; - - async fn best_block(&self) -> Result, TestError> { - self.data.lock().await.target_best_block.clone() - } - - async fn best_finalized_source_relay_chain_block( - &self, - _at_block: &HeaderIdOf, - ) -> Result, TestError> { - self.data.lock().await.target_best_finalized_source_block.clone() - } - - async fn free_source_relay_headers_interval( - &self, - ) -> Result>, TestError> { - self.data.lock().await.target_free_source_relay_headers_interval.clone() - } - - async fn parachain_head( - &self, - _at_block: HeaderIdOf, - ) -> Result, HeaderIdOf)>, TestError> { - self.data.lock().await.target_head.clone() - } - - async fn submit_parachain_head_proof( - &self, - at_source_block: HeaderIdOf, - _updated_parachain_head: ParaHash, - _proof: ParaHeadsProof, - _is_free_execution_expected: bool, - ) -> Result { - let mut data = self.data.lock().await; - data.target_submit_result.clone()?; - data.submitted_proof_at_source_relay_block = Some(at_source_block); - - if let Some(mut exit_signal_sender) = data.exit_signal_sender.take() { - exit_signal_sender.send(()).await.unwrap(); - } - Ok(TestTransactionTracker(Some( - TrackedTransactionStatus::Finalized(Default::default()), - ))) - } - } - - #[test] - fn when_source_client_fails_to_return_sync_state() { - let mut test_source_client = TestClientData::minimal(); - test_source_client.source_sync_status = Err(TestError::Error); - - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(test_source_client), - TestClient::from(TestClientData::minimal()), - None, - false, - futures::future::pending(), - )), - Err(FailedClient::Source), - ); - } - - #[test] - fn when_target_client_fails_to_return_best_block() { - let mut test_target_client = TestClientData::minimal(); - test_target_client.target_best_block = Err(TestError::Error); - - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(TestClientData::minimal()), - TestClient::from(test_target_client), - None, - false, - futures::future::pending(), - )), - Err(FailedClient::Target), - ); - } - - #[test] - fn when_target_client_fails_to_read_heads() { - let mut test_target_client = TestClientData::minimal(); - test_target_client.target_head = Err(TestError::Error); - - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(TestClientData::minimal()), - TestClient::from(test_target_client), - None, - false, - futures::future::pending(), - )), - Err(FailedClient::Target), - ); - } - - #[test] - fn when_target_client_fails_to_read_best_finalized_source_block() { - let mut test_target_client = TestClientData::minimal(); - test_target_client.target_best_finalized_source_block = Err(TestError::Error); - - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(TestClientData::minimal()), - TestClient::from(test_target_client), - None, - false, - futures::future::pending(), - )), - Err(FailedClient::Target), - ); - } - - #[test] - fn when_source_client_fails_to_read_heads() { - let mut test_source_client = TestClientData::minimal(); - test_source_client.source_head.insert(0, Err(TestError::Error)); - - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(test_source_client), - TestClient::from(TestClientData::minimal()), - None, - false, - futures::future::pending(), - )), - Err(FailedClient::Source), - ); - } - - #[test] - fn when_source_client_fails_to_prove_heads() { - let mut test_source_client = TestClientData::minimal(); - test_source_client.source_proof = Err(TestError::Error); - - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(test_source_client), - TestClient::from(TestClientData::minimal()), - None, - false, - futures::future::pending(), - )), - Err(FailedClient::Source), - ); - } - - #[test] - fn when_target_client_rejects_update_transaction() { - let mut test_target_client = TestClientData::minimal(); - test_target_client.target_submit_result = Err(TestError::Error); - - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(TestClientData::minimal()), - TestClient::from(test_target_client), - None, - false, - futures::future::pending(), - )), - Err(FailedClient::Target), - ); - } - - #[test] - fn minimal_working_case() { - let (exit_signal_sender, exit_signal) = futures::channel::mpsc::unbounded(); - assert_eq!( - async_std::task::block_on(run_until_connection_lost( - TestClient::from(TestClientData::minimal()), - TestClient::from(TestClientData::with_exit_signal_sender(exit_signal_sender)), - None, - false, - exit_signal.into_future().map(|(_, _)| ()), - )), - Ok(()), - ); - } - - #[async_std::test] - async fn free_headers_are_relayed() { - // prepare following case: - // 1) best source relay at target: 95 - // 2) best source parachain at target: 5 at relay 50 - // 3) free headers interval: 10 - // 4) at source relay chain block 90 source parachain block is 9 - // + - // 5) best finalized source relay chain block is 95 - // 6) at source relay chain block 95 source parachain block is 42 - // => - // parachain block 42 would have been relayed, because 95 - 50 > 10 - let (exit_signal_sender, exit_signal) = futures::channel::mpsc::unbounded(); - let clients_data = TestClientData { - source_sync_status: Ok(true), - source_head: vec![ - (90, Ok(AvailableHeader::Available(HeaderId(9, [9u8; 32].into())))), - (95, Ok(AvailableHeader::Available(HeaderId(42, [42u8; 32].into())))), - ] - .into_iter() - .collect(), - source_proof: Ok(()), - - target_free_source_relay_headers_interval: Ok(Some(10)), - target_best_block: Ok(HeaderId(200, [200u8; 32].into())), - target_best_finalized_source_block: Ok(HeaderId(95, [95u8; 32].into())), - target_head: Ok(Some((HeaderId(50, [50u8; 32].into()), HeaderId(5, [5u8; 32].into())))), - target_submit_result: Ok(()), - - submitted_proof_at_source_relay_block: None, - exit_signal_sender: Some(Box::new(exit_signal_sender)), - }; - - let source_client = TestClient::from(clients_data.clone()); - let target_client = TestClient::from(clients_data); - assert_eq!( - run_until_connection_lost( - source_client, - target_client.clone(), - None, - true, - exit_signal.into_future().map(|(_, _)| ()), - ) - .await, - Ok(()), - ); - - assert_eq!( - target_client - .data - .lock() - .await - .submitted_proof_at_source_relay_block - .map(|id| id.0), - Some(95) - ); - - // now source relay block chain 104 is mined with parachain head #84 - // => since 104 - 95 < 10, there are no free headers - // => nothing is submitted - let mut clients_data: TestClientData = target_client.data.lock().await.clone(); - clients_data - .source_head - .insert(104, Ok(AvailableHeader::Available(HeaderId(84, [84u8; 32].into())))); - clients_data.target_best_finalized_source_block = Ok(HeaderId(104, [104u8; 32].into())); - clients_data.target_head = - Ok(Some((HeaderId(95, [95u8; 32].into()), HeaderId(42, [42u8; 32].into())))); - clients_data.target_best_block = Ok(HeaderId(255, [255u8; 32].into())); - clients_data.exit_signal_sender = None; - - let source_client = TestClient::from(clients_data.clone()); - let target_client = TestClient::from(clients_data); - assert_eq!( - run_until_connection_lost( - source_client, - target_client.clone(), - None, - true, - async_std::task::sleep(std::time::Duration::from_millis(100)), - ) - .await, - Ok(()), - ); - - assert_eq!( - target_client - .data - .lock() - .await - .submitted_proof_at_source_relay_block - .map(|id| id.0), - Some(95) - ); - } - - fn test_tx_tracker() -> SubmittedHeadsTracker { - SubmittedHeadsTracker::new( - AvailableHeader::Available(HeaderId(20, PARA_20_HASH)), - TestTransactionTracker(None), - ) - } - - impl From> for Option<()> { - fn from(status: SubmittedHeadStatus) -> Option<()> { - match status { - SubmittedHeadStatus::Waiting(_) => Some(()), - _ => None, - } - } - } - - #[async_std::test] - async fn tx_tracker_update_when_head_at_target_has_none_value() { - assert_eq!( - Some(()), - test_tx_tracker() - .update(&HeaderId(0, Default::default()), &Some(HeaderId(10, PARA_10_HASH))) - .await - .into(), - ); - } - - #[async_std::test] - async fn tx_tracker_update_when_head_at_target_has_old_value() { - assert_eq!( - Some(()), - test_tx_tracker() - .update(&HeaderId(0, Default::default()), &Some(HeaderId(10, PARA_10_HASH))) - .await - .into(), - ); - } - - #[async_std::test] - async fn tx_tracker_update_when_head_at_target_has_same_value() { - assert!(matches!( - test_tx_tracker() - .update(&HeaderId(0, Default::default()), &Some(HeaderId(20, PARA_20_HASH))) - .await, - SubmittedHeadStatus::Final(TrackedTransactionStatus::Finalized(_)), - )); - } - - #[async_std::test] - async fn tx_tracker_update_when_head_at_target_has_better_value() { - assert!(matches!( - test_tx_tracker() - .update(&HeaderId(0, Default::default()), &Some(HeaderId(30, PARA_20_HASH))) - .await, - SubmittedHeadStatus::Final(TrackedTransactionStatus::Finalized(_)), - )); - } - - #[async_std::test] - async fn tx_tracker_update_when_tx_is_lost() { - let mut tx_tracker = test_tx_tracker(); - tx_tracker.transaction_tracker = - futures::future::ready(TrackedTransactionStatus::Lost).boxed().shared(); - assert!(matches!( - tx_tracker - .update(&HeaderId(0, Default::default()), &Some(HeaderId(10, PARA_10_HASH))) - .await, - SubmittedHeadStatus::Final(TrackedTransactionStatus::Lost), - )); - } - - #[async_std::test] - async fn tx_tracker_update_when_tx_is_finalized_but_heads_are_not_updated() { - let mut tx_tracker = test_tx_tracker(); - tx_tracker.transaction_tracker = - futures::future::ready(TrackedTransactionStatus::Finalized(Default::default())) - .boxed() - .shared(); - assert!(matches!( - tx_tracker - .update(&HeaderId(0, Default::default()), &Some(HeaderId(10, PARA_10_HASH))) - .await, - SubmittedHeadStatus::Final(TrackedTransactionStatus::Lost), - )); - } - - #[test] - fn parachain_is_not_updated_if_it_is_unavailable() { - assert!(!is_update_required::( - AvailableHeader::Unavailable, - None, - Default::default(), - Default::default(), - )); - assert!(!is_update_required::( - AvailableHeader::Unavailable, - Some(HeaderId(10, PARA_10_HASH)), - Default::default(), - Default::default(), - )); - } - - #[test] - fn parachain_is_not_updated_if_it_is_unknown_to_both_clients() { - assert!(!is_update_required::( - AvailableHeader::Missing, - None, - Default::default(), - Default::default(), - ),); - } - - #[test] - fn parachain_is_not_updated_if_target_has_better_head() { - assert!(!is_update_required::( - AvailableHeader::Available(HeaderId(10, Default::default())), - Some(HeaderId(20, Default::default())), - Default::default(), - Default::default(), - ),); - } - - #[test] - fn parachain_is_updated_after_offboarding() { - assert!(is_update_required::( - AvailableHeader::Missing, - Some(HeaderId(20, Default::default())), - Default::default(), - Default::default(), - ),); - } - - #[test] - fn parachain_is_updated_after_onboarding() { - assert!(is_update_required::( - AvailableHeader::Available(HeaderId(30, Default::default())), - None, - Default::default(), - Default::default(), - ),); - } - - #[test] - fn parachain_is_updated_if_newer_head_is_known() { - assert!(is_update_required::( - AvailableHeader::Available(HeaderId(40, Default::default())), - Some(HeaderId(30, Default::default())), - Default::default(), - Default::default(), - ),); - } -} diff --git a/relays/parachains/src/parachains_loop_metrics.rs b/relays/parachains/src/parachains_loop_metrics.rs deleted file mode 100644 index 8138a43b3b3dc97997816b13aea1bc973206d48b..0000000000000000000000000000000000000000 --- a/relays/parachains/src/parachains_loop_metrics.rs +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use bp_polkadot_core::parachains::ParaId; -use relay_utils::{ - metrics::{metric_name, register, Gauge, Metric, PrometheusError, Registry, U64}, - UniqueSaturatedInto, -}; - -/// Parachains sync metrics. -#[derive(Clone)] -pub struct ParachainsLoopMetrics { - /// Best parachains header numbers at the source. - best_source_block_numbers: Gauge, - /// Best parachains header numbers at the target. - best_target_block_numbers: Gauge, -} - -impl ParachainsLoopMetrics { - /// Create and register parachains loop metrics. - pub fn new(prefix: Option<&str>) -> Result { - Ok(ParachainsLoopMetrics { - best_source_block_numbers: Gauge::new( - metric_name(prefix, "best_parachain_block_number_at_source"), - "Best parachain block numbers at the source relay chain".to_string(), - )?, - best_target_block_numbers: Gauge::new( - metric_name(prefix, "best_parachain_block_number_at_target"), - "Best parachain block numbers at the target chain".to_string(), - )?, - }) - } - - /// Update best block number at source. - pub fn update_best_parachain_block_at_source>( - &self, - parachain: ParaId, - block_number: Number, - ) { - let block_number = block_number.unique_saturated_into(); - log::trace!( - target: "bridge-metrics", - "Updated value of metric 'best_parachain_block_number_at_source[{:?}]': {:?}", - parachain, - block_number, - ); - self.best_source_block_numbers.set(block_number); - } - - /// Update best block number at target. - pub fn update_best_parachain_block_at_target>( - &self, - parachain: ParaId, - block_number: Number, - ) { - let block_number = block_number.unique_saturated_into(); - log::trace!( - target: "bridge-metrics", - "Updated value of metric 'best_parachain_block_number_at_target[{:?}]': {:?}", - parachain, - block_number, - ); - self.best_target_block_numbers.set(block_number); - } -} - -impl Metric for ParachainsLoopMetrics { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.best_source_block_numbers.clone(), registry)?; - register(self.best_target_block_numbers.clone(), registry)?; - Ok(()) - } -} diff --git a/relays/utils/Cargo.toml b/relays/utils/Cargo.toml deleted file mode 100644 index ed6093318a0dcf0bbfb76250f7bcd2d04b942387..0000000000000000000000000000000000000000 --- a/relays/utils/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "relay-utils" -version = "0.1.0" -authors = ["Parity Technologies "] -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" - -[lints] -workspace = true - -[dependencies] -ansi_term = "0.12" -anyhow = "1.0" -async-std = "1.6.5" -async-trait = "0.1" -backoff = "0.4" -isahc = "1.2" -env_logger = "0.11.3" -futures = "0.3.30" -jsonpath_lib = "0.3" -log = { workspace = true } -num-traits = "0.2" -serde_json = { workspace = true, default-features = true } -sysinfo = "0.30" -time = { version = "0.3", features = ["formatting", "local-offset", "std"] } -tokio = { version = "1.36", features = ["rt"] } -thiserror = { workspace = true } - -# Bridge dependencies - -bp-runtime = { path = "../../primitives/runtime" } - -# Substrate dependencies - -sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } diff --git a/relays/utils/src/error.rs b/relays/utils/src/error.rs deleted file mode 100644 index 26f1d0cacefd8eef5687e0102588f999859012a5..0000000000000000000000000000000000000000 --- a/relays/utils/src/error.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use std::net::AddrParseError; -use thiserror::Error; - -/// Result type used by relay utilities. -pub type Result = std::result::Result; - -/// Relay utilities errors. -#[derive(Error, Debug)] -pub enum Error { - /// Failed to request a float value from HTTP service. - #[error("Failed to fetch token price from remote server: {0}")] - FetchTokenPrice(#[source] anyhow::Error), - /// Failed to parse the response from HTTP service. - #[error("Failed to parse HTTP service response: {0:?}. Response: {1:?}")] - ParseHttp(serde_json::Error, String), - /// Failed to select response value from the Json response. - #[error("Failed to select value from response: {0:?}. Response: {1:?}")] - SelectResponseValue(jsonpath_lib::JsonPathError, String), - /// Failed to parse float value from the selected value. - #[error( - "Failed to parse float value {0:?} from response. It is assumed to be positive and normal" - )] - ParseFloat(f64), - /// Couldn't found value in the JSON response. - #[error("Missing required value from response: {0:?}")] - MissingResponseValue(String), - /// Invalid host address was used for exposing Prometheus metrics. - #[error("Invalid host {0} is used to expose Prometheus metrics: {1}")] - ExposingMetricsInvalidHost(String, AddrParseError), - /// Prometheus error. - #[error("{0}")] - Prometheus(#[from] substrate_prometheus_endpoint::prometheus::Error), -} diff --git a/relays/utils/src/initialize.rs b/relays/utils/src/initialize.rs deleted file mode 100644 index 8224c1803ad2f74ec694cd68c18a556dddc6d76f..0000000000000000000000000000000000000000 --- a/relays/utils/src/initialize.rs +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Relayer initialization functions. - -use std::{cell::RefCell, fmt::Display, io::Write}; - -async_std::task_local! { - pub(crate) static LOOP_NAME: RefCell = RefCell::new(String::default()); -} - -/// Initialize relay environment. -pub fn initialize_relay() { - initialize_logger(true); -} - -/// Initialize Relay logger instance. -pub fn initialize_logger(with_timestamp: bool) { - let format = time::format_description::parse( - "[year]-[month]-[day] \ - [hour repr:24]:[minute]:[second] [offset_hour sign:mandatory]", - ) - .expect("static format string is valid"); - - let mut builder = env_logger::Builder::new(); - builder.filter_level(log::LevelFilter::Warn); - builder.filter_module("bridge", log::LevelFilter::Info); - builder.parse_default_env(); - if with_timestamp { - builder.format(move |buf, record| { - let timestamp = time::OffsetDateTime::now_local() - .unwrap_or_else(|_| time::OffsetDateTime::now_utc()); - let timestamp = timestamp.format(&format).unwrap_or_else(|_| timestamp.to_string()); - - let log_level = color_level(record.level()); - let log_target = color_target(record.target()); - let timestamp = if cfg!(windows) { - Either::Left(timestamp) - } else { - Either::Right(ansi_term::Colour::Fixed(8).bold().paint(timestamp)) - }; - - writeln!( - buf, - "{}{} {} {} {}", - loop_name_prefix(), - timestamp, - log_level, - log_target, - record.args(), - ) - }); - } else { - builder.format(move |buf, record| { - let log_level = color_level(record.level()); - let log_target = color_target(record.target()); - - writeln!(buf, "{}{log_level} {log_target} {}", loop_name_prefix(), record.args(),) - }); - } - - builder.init(); -} - -/// Initialize relay loop. Must only be called once per every loop task. -pub(crate) fn initialize_loop(loop_name: String) { - LOOP_NAME.with(|g_loop_name| *g_loop_name.borrow_mut() = loop_name); -} - -/// Returns loop name prefix to use in logs. The prefix is initialized with the `initialize_loop` -/// call. -fn loop_name_prefix() -> String { - // try_with to avoid panic outside of async-std task context - LOOP_NAME - .try_with(|loop_name| { - // using borrow is ok here, because loop is only initialized once (=> borrow_mut will - // only be called once) - let loop_name = loop_name.borrow(); - if loop_name.is_empty() { - String::new() - } else { - format!("[{loop_name}] ") - } - }) - .unwrap_or_else(|_| String::new()) -} - -enum Either { - Left(A), - Right(B), -} -impl Display for Either { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Self::Left(a) => write!(fmt, "{a}"), - Self::Right(b) => write!(fmt, "{b}"), - } - } -} - -fn color_target(target: &str) -> impl Display + '_ { - if cfg!(windows) { - Either::Left(target) - } else { - Either::Right(ansi_term::Colour::Fixed(8).paint(target)) - } -} - -fn color_level(level: log::Level) -> impl Display { - if cfg!(windows) { - Either::Left(level) - } else { - let s = level.to_string(); - use ansi_term::Colour as Color; - Either::Right(match level { - log::Level::Error => Color::Fixed(9).bold().paint(s), - log::Level::Warn => Color::Fixed(11).bold().paint(s), - log::Level::Info => Color::Fixed(10).paint(s), - log::Level::Debug => Color::Fixed(14).paint(s), - log::Level::Trace => Color::Fixed(12).paint(s), - }) - } -} diff --git a/relays/utils/src/lib.rs b/relays/utils/src/lib.rs deleted file mode 100644 index 1df6e9718f15722401afe1e01748e3d80eb70a8a..0000000000000000000000000000000000000000 --- a/relays/utils/src/lib.rs +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Utilities used by different relays. - -pub use bp_runtime::HeaderId; -pub use error::Error; -pub use relay_loop::{relay_loop, relay_metrics}; -pub use sp_runtime::traits::{UniqueSaturatedFrom, UniqueSaturatedInto}; -use std::fmt::Debug; - -use async_trait::async_trait; -use backoff::{backoff::Backoff, ExponentialBackoff}; -use futures::future::{BoxFuture, FutureExt}; -use std::time::Duration; -use thiserror::Error; - -/// Default relay loop stall timeout. If transactions generated by relay are immortal, then -/// this timeout is used. -/// -/// There are no any strict requirements on block time in Substrate. But we assume here that all -/// Substrate-based chains will be designed to produce relatively fast (compared to the slowest -/// blockchains) blocks. So 1 hour seems to be a good guess for (even congested) chains to mine -/// transaction, or remove it from the pool. -pub const STALL_TIMEOUT: Duration = Duration::from_secs(60 * 60); - -/// Max delay after connection-unrelated error happened before we'll try the -/// same request again. -pub const MAX_BACKOFF_INTERVAL: Duration = Duration::from_secs(60); -/// Delay after connection-related error happened before we'll try -/// reconnection again. -pub const CONNECTION_ERROR_DELAY: Duration = Duration::from_secs(10); - -pub mod error; -pub mod initialize; -pub mod metrics; -pub mod relay_loop; - -/// Block number traits shared by all chains that relay is able to serve. -pub trait BlockNumberBase: - 'static - + From - + UniqueSaturatedInto - + Ord - + Clone - + Copy - + Default - + Send - + Sync - + std::fmt::Debug - + std::fmt::Display - + std::hash::Hash - + std::ops::Add - + std::ops::Rem - + std::ops::Sub - + num_traits::CheckedSub - + num_traits::Saturating - + num_traits::Zero - + num_traits::One -{ -} - -impl BlockNumberBase for T where - T: 'static - + From - + UniqueSaturatedInto - + Ord - + Clone - + Copy - + Default - + Send - + Sync - + std::fmt::Debug - + std::fmt::Display - + std::hash::Hash - + std::ops::Add - + std::ops::Rem - + std::ops::Sub - + num_traits::CheckedSub - + num_traits::Saturating - + num_traits::Zero - + num_traits::One -{ -} - -/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). -#[macro_export] -macro_rules! bail_on_error { - ($result: expr) => { - match $result { - (client, Ok(result)) => (client, result), - (client, Err(error)) => return (client, Err(error)), - } - }; -} - -/// Macro that returns (client, Err(error)) tuple from function if result is Err(error). -#[macro_export] -macro_rules! bail_on_arg_error { - ($result: expr, $client: ident) => { - match $result { - Ok(result) => result, - Err(error) => return ($client, Err(error)), - } - }; -} - -/// Error type that can signal connection errors. -pub trait MaybeConnectionError { - /// Returns true if error (maybe) represents connection error. - fn is_connection_error(&self) -> bool; -} - -/// Final status of the tracked transaction. -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum TrackedTransactionStatus { - /// Transaction has been lost. - Lost, - /// Transaction has been mined and finalized at given block. - Finalized(BlockId), -} - -/// Transaction tracker. -#[async_trait] -pub trait TransactionTracker: Send { - /// Header id, used by the chain. - type HeaderId: Clone + Debug + Send; - - /// Wait until transaction is either finalized or invalidated/lost. - async fn wait(self) -> TrackedTransactionStatus; -} - -/// Future associated with `TransactionTracker`, monitoring the transaction status. -pub type TrackedTransactionFuture<'a, T> = - BoxFuture<'a, TrackedTransactionStatus<::HeaderId>>; - -/// Stringified error that may be either connection-related or not. -#[derive(Error, Debug)] -pub enum StringifiedMaybeConnectionError { - /// The error is connection-related error. - #[error("{0}")] - Connection(String), - /// The error is connection-unrelated error. - #[error("{0}")] - NonConnection(String), -} - -impl StringifiedMaybeConnectionError { - /// Create new stringified connection error. - pub fn new(is_connection_error: bool, error: String) -> Self { - if is_connection_error { - StringifiedMaybeConnectionError::Connection(error) - } else { - StringifiedMaybeConnectionError::NonConnection(error) - } - } -} - -impl MaybeConnectionError for StringifiedMaybeConnectionError { - fn is_connection_error(&self) -> bool { - match *self { - StringifiedMaybeConnectionError::Connection(_) => true, - StringifiedMaybeConnectionError::NonConnection(_) => false, - } - } -} - -/// Exponential backoff for connection-unrelated errors retries. -pub fn retry_backoff() -> ExponentialBackoff { - ExponentialBackoff { - // we do not want relayer to stop - max_elapsed_time: None, - max_interval: MAX_BACKOFF_INTERVAL, - ..Default::default() - } -} - -/// Compact format of IDs vector. -pub fn format_ids(mut ids: impl ExactSizeIterator) -> String { - const NTH_PROOF: &str = "we have checked len; qed"; - match ids.len() { - 0 => "".into(), - 1 => format!("{:?}", ids.next().expect(NTH_PROOF)), - 2 => { - let id0 = ids.next().expect(NTH_PROOF); - let id1 = ids.next().expect(NTH_PROOF); - format!("[{id0:?}, {id1:?}]") - }, - len => { - let id0 = ids.next().expect(NTH_PROOF); - let id_last = ids.last().expect(NTH_PROOF); - format!("{len}:[{id0:?} ... {id_last:?}]") - }, - } -} - -/// Stream that emits item every `timeout_ms` milliseconds. -pub fn interval(timeout: Duration) -> impl futures::Stream { - futures::stream::unfold((), move |_| async move { - async_std::task::sleep(timeout).await; - Some(((), ())) - }) -} - -/// Which client has caused error. -#[derive(Debug, Eq, Clone, Copy, PartialEq)] -pub enum FailedClient { - /// It is the source client who has caused error. - Source, - /// It is the target client who has caused error. - Target, - /// Both clients are failing, or we just encountered some other error that - /// should be treated like that. - Both, -} - -/// Future process result. -#[derive(Debug, Clone, Copy)] -pub enum ProcessFutureResult { - /// Future has been processed successfully. - Success, - /// Future has failed with non-connection error. - Failed, - /// Future has failed with connection error. - ConnectionFailed, -} - -impl ProcessFutureResult { - /// Returns true if result is Success. - pub fn is_ok(self) -> bool { - match self { - ProcessFutureResult::Success => true, - ProcessFutureResult::Failed | ProcessFutureResult::ConnectionFailed => false, - } - } - - /// Returns `Ok(())` if future has succeeded. - /// Returns `Err(failed_client)` otherwise. - pub fn fail_if_error(self, failed_client: FailedClient) -> Result<(), FailedClient> { - if self.is_ok() { - Ok(()) - } else { - Err(failed_client) - } - } - - /// Returns Ok(true) if future has succeeded. - /// Returns Ok(false) if future has failed with non-connection error. - /// Returns Err if future is `ConnectionFailed`. - pub fn fail_if_connection_error( - self, - failed_client: FailedClient, - ) -> Result { - match self { - ProcessFutureResult::Success => Ok(true), - ProcessFutureResult::Failed => Ok(false), - ProcessFutureResult::ConnectionFailed => Err(failed_client), - } - } -} - -/// Process result of the future from a client. -pub fn process_future_result( - result: Result, - retry_backoff: &mut ExponentialBackoff, - on_success: impl FnOnce(TResult), - go_offline_future: &mut std::pin::Pin<&mut futures::future::Fuse>, - go_offline: impl FnOnce(Duration) -> TGoOfflineFuture, - error_pattern: impl FnOnce() -> String, -) -> ProcessFutureResult -where - TError: std::fmt::Debug + MaybeConnectionError, - TGoOfflineFuture: FutureExt, -{ - match result { - Ok(result) => { - on_success(result); - retry_backoff.reset(); - ProcessFutureResult::Success - }, - Err(error) if error.is_connection_error() => { - log::error!( - target: "bridge", - "{}: {:?}. Going to restart", - error_pattern(), - error, - ); - - retry_backoff.reset(); - go_offline_future.set(go_offline(CONNECTION_ERROR_DELAY).fuse()); - ProcessFutureResult::ConnectionFailed - }, - Err(error) => { - let retry_delay = retry_backoff.next_backoff().unwrap_or(CONNECTION_ERROR_DELAY); - log::error!( - target: "bridge", - "{}: {:?}. Retrying in {}", - error_pattern(), - error, - retry_delay.as_secs_f64(), - ); - - go_offline_future.set(go_offline(retry_delay).fuse()); - ProcessFutureResult::Failed - }, - } -} diff --git a/relays/utils/src/metrics.rs b/relays/utils/src/metrics.rs deleted file mode 100644 index 2e6c8236da454dd620ccdb5f1f03cdbf9eed9471..0000000000000000000000000000000000000000 --- a/relays/utils/src/metrics.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -pub use float_json_value::FloatJsonValueMetric; -pub use global::GlobalMetrics; -pub use substrate_prometheus_endpoint::{ - prometheus::core::{Atomic, Collector}, - register, Counter, CounterVec, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, I64, U64, -}; - -use async_std::sync::{Arc, RwLock}; -use async_trait::async_trait; -use std::{fmt::Debug, time::Duration}; - -mod float_json_value; -mod global; - -/// Shared reference to `f64` value that is updated by the metric. -pub type F64SharedRef = Arc>>; -/// Int gauge metric type. -pub type IntGauge = Gauge; - -/// Unparsed address that needs to be used to expose Prometheus metrics. -#[derive(Debug, Clone)] -pub struct MetricsAddress { - /// Serve HTTP requests at given host. - pub host: String, - /// Serve HTTP requests at given port. - pub port: u16, -} - -/// Prometheus endpoint MetricsParams. -#[derive(Debug, Clone)] -pub struct MetricsParams { - /// Interface and TCP port to be used when exposing Prometheus metrics. - pub address: Option, - /// Metrics registry. May be `Some(_)` if several components share the same endpoint. - pub registry: Registry, -} - -/// Metric API. -pub trait Metric: Clone + Send + Sync + 'static { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError>; -} - -/// Standalone metric API. -/// -/// Metrics of this kind know how to update themselves, so we may just spawn and forget the -/// asynchronous self-update task. -#[async_trait] -pub trait StandaloneMetric: Metric { - /// Update metric values. - async fn update(&self); - - /// Metrics update interval. - fn update_interval(&self) -> Duration; - - /// Register and spawn metric. Metric is only spawned if it is registered for the first time. - fn register_and_spawn(self, registry: &Registry) -> Result<(), PrometheusError> { - match self.register(registry) { - Ok(()) => { - self.spawn(); - Ok(()) - }, - Err(PrometheusError::AlreadyReg) => Ok(()), - Err(e) => Err(e), - } - } - - /// Spawn the self update task that will keep update metric value at given intervals. - fn spawn(self) { - async_std::task::spawn(async move { - let update_interval = self.update_interval(); - loop { - self.update().await; - async_std::task::sleep(update_interval).await; - } - }); - } -} - -impl Default for MetricsAddress { - fn default() -> Self { - MetricsAddress { host: "127.0.0.1".into(), port: 9616 } - } -} - -impl MetricsParams { - /// Creates metrics params from metrics address. - pub fn new( - address: Option, - relay_version: String, - relay_commit: String, - ) -> Result { - const BUILD_INFO_METRIC: &str = "substrate_relay_build_info"; - - let registry = Registry::new(); - register( - Gauge::::with_opts( - Opts::new( - BUILD_INFO_METRIC, - "A metric with a constant '1' value labeled by version", - ) - .const_label("version", &relay_version) - .const_label("commit", &relay_commit), - )?, - ®istry, - )? - .set(1); - - log::info!( - target: "bridge", - "Exposed {} metric: version={} commit={}", - BUILD_INFO_METRIC, - relay_version, - relay_commit, - ); - - Ok(MetricsParams { address, registry }) - } - - /// Creates metrics params so that metrics are not exposed. - pub fn disabled() -> Self { - MetricsParams { address: None, registry: Registry::new() } - } - - /// Do not expose metrics. - #[must_use] - pub fn disable(mut self) -> Self { - self.address = None; - self - } -} - -/// Returns metric name optionally prefixed with given prefix. -pub fn metric_name(prefix: Option<&str>, name: &str) -> String { - if let Some(prefix) = prefix { - format!("{prefix}_{name}") - } else { - name.into() - } -} - -/// Set value of gauge metric. -/// -/// If value is `Ok(None)` or `Err(_)`, metric would have default value. -pub fn set_gauge_value, E: Debug>( - gauge: &Gauge, - value: Result, E>, -) { - gauge.set(match value { - Ok(Some(value)) => { - log::trace!( - target: "bridge-metrics", - "Updated value of metric '{:?}': {:?}", - gauge.desc().first().map(|d| &d.fq_name), - value, - ); - value - }, - Ok(None) => { - log::warn!( - target: "bridge-metrics", - "Failed to update metric '{:?}': value is empty", - gauge.desc().first().map(|d| &d.fq_name), - ); - Default::default() - }, - Err(error) => { - log::warn!( - target: "bridge-metrics", - "Failed to update metric '{:?}': {:?}", - gauge.desc().first().map(|d| &d.fq_name), - error, - ); - Default::default() - }, - }) -} diff --git a/relays/utils/src/metrics/float_json_value.rs b/relays/utils/src/metrics/float_json_value.rs deleted file mode 100644 index 17b09e050973ada83a9735e373292c92ed31ff4b..0000000000000000000000000000000000000000 --- a/relays/utils/src/metrics/float_json_value.rs +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::{self, Error}, - metrics::{ - metric_name, register, F64SharedRef, Gauge, Metric, PrometheusError, Registry, - StandaloneMetric, F64, - }, -}; - -use async_std::sync::{Arc, RwLock}; -use async_trait::async_trait; -use std::time::Duration; - -/// Value update interval. -const UPDATE_INTERVAL: Duration = Duration::from_secs(300); - -/// Metric that represents float value received from HTTP service as float gauge. -/// -/// The float value returned by the service is assumed to be normal (`f64::is_normal` -/// should return `true`) and strictly positive. -#[derive(Debug, Clone)] -pub struct FloatJsonValueMetric { - url: String, - json_path: String, - metric: Gauge, - shared_value_ref: F64SharedRef, -} - -impl FloatJsonValueMetric { - /// Create new metric instance with given name and help. - pub fn new( - url: String, - json_path: String, - name: String, - help: String, - ) -> Result { - let shared_value_ref = Arc::new(RwLock::new(None)); - Ok(FloatJsonValueMetric { - url, - json_path, - metric: Gauge::new(metric_name(None, &name), help)?, - shared_value_ref, - }) - } - - /// Get shared reference to metric value. - pub fn shared_value_ref(&self) -> F64SharedRef { - self.shared_value_ref.clone() - } - - /// Request value from HTTP service. - async fn request_value(&self) -> anyhow::Result { - use isahc::{AsyncReadResponseExt, HttpClient, Request}; - - let request = Request::get(&self.url).header("Accept", "application/json").body(())?; - let raw_response = HttpClient::new()?.send_async(request).await?.text().await?; - Ok(raw_response) - } - - /// Read value from HTTP service. - async fn read_value(&self) -> error::Result { - let raw_response = self.request_value().await.map_err(Error::FetchTokenPrice)?; - parse_service_response(&self.json_path, &raw_response) - } -} - -impl Metric for FloatJsonValueMetric { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.metric.clone(), registry).map(drop) - } -} - -#[async_trait] -impl StandaloneMetric for FloatJsonValueMetric { - fn update_interval(&self) -> Duration { - UPDATE_INTERVAL - } - - async fn update(&self) { - let value = self.read_value().await; - let maybe_ok = value.as_ref().ok().copied(); - crate::metrics::set_gauge_value(&self.metric, value.map(Some)); - *self.shared_value_ref.write().await = maybe_ok; - } -} - -/// Parse HTTP service response. -fn parse_service_response(json_path: &str, response: &str) -> error::Result { - let json = - serde_json::from_str(response).map_err(|err| Error::ParseHttp(err, response.to_owned()))?; - - let mut selector = jsonpath_lib::selector(&json); - let maybe_selected_value = - selector(json_path).map_err(|err| Error::SelectResponseValue(err, response.to_owned()))?; - let selected_value = maybe_selected_value - .first() - .and_then(|v| v.as_f64()) - .ok_or_else(|| Error::MissingResponseValue(response.to_owned()))?; - if !selected_value.is_normal() || selected_value < 0.0 { - return Err(Error::ParseFloat(selected_value)) - } - - Ok(selected_value) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_service_response_works() { - assert_eq!( - parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":433.05}}"#).map_err(drop), - Ok(433.05), - ); - } - - #[test] - fn parse_service_response_rejects_negative_numbers() { - assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":-433.05}}"#).is_err()); - } - - #[test] - fn parse_service_response_rejects_zero_numbers() { - assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":0.0}}"#).is_err()); - } - - #[test] - fn parse_service_response_rejects_nan() { - assert!(parse_service_response("$.kusama.usd", r#"{"kusama":{"usd":NaN}}"#).is_err()); - } -} diff --git a/relays/utils/src/metrics/global.rs b/relays/utils/src/metrics/global.rs deleted file mode 100644 index 9b22fb86ef0cbf11268b80e7c0ac0d8c2d68d129..0000000000000000000000000000000000000000 --- a/relays/utils/src/metrics/global.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Global system-wide Prometheus metrics exposed by relays. - -use crate::metrics::{ - metric_name, register, Gauge, GaugeVec, Metric, Opts, PrometheusError, Registry, - StandaloneMetric, F64, U64, -}; - -use async_std::sync::{Arc, Mutex}; -use async_trait::async_trait; -use std::time::Duration; -use sysinfo::{RefreshKind, System}; - -/// Global metrics update interval. -const UPDATE_INTERVAL: Duration = Duration::from_secs(10); - -/// Global Prometheus metrics. -#[derive(Debug, Clone)] -pub struct GlobalMetrics { - system: Arc>, - system_average_load: GaugeVec, - process_cpu_usage_percentage: Gauge, - process_memory_usage_bytes: Gauge, -} - -impl GlobalMetrics { - /// Create and register global metrics. - pub fn new() -> Result { - Ok(GlobalMetrics { - system: Arc::new(Mutex::new(System::new_with_specifics(RefreshKind::everything()))), - system_average_load: GaugeVec::new( - Opts::new(metric_name(None, "system_average_load"), "System load average"), - &["over"], - )?, - process_cpu_usage_percentage: Gauge::new( - metric_name(None, "process_cpu_usage_percentage"), - "Process CPU usage", - )?, - process_memory_usage_bytes: Gauge::new( - metric_name(None, "process_memory_usage_bytes"), - "Process memory (resident set size) usage", - )?, - }) - } -} - -impl Metric for GlobalMetrics { - fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { - register(self.system_average_load.clone(), registry)?; - register(self.process_cpu_usage_percentage.clone(), registry)?; - register(self.process_memory_usage_bytes.clone(), registry)?; - Ok(()) - } -} - -#[async_trait] -impl StandaloneMetric for GlobalMetrics { - async fn update(&self) { - // update system-wide metrics - let mut system = self.system.lock().await; - let load = sysinfo::System::load_average(); - self.system_average_load.with_label_values(&["1min"]).set(load.one); - self.system_average_load.with_label_values(&["5min"]).set(load.five); - self.system_average_load.with_label_values(&["15min"]).set(load.fifteen); - - // update process-related metrics - let pid = sysinfo::get_current_pid().expect( - "only fails where pid is unavailable (os=unknown || arch=wasm32);\ - relay is not supposed to run in such MetricsParamss;\ - qed", - ); - let is_process_refreshed = system.refresh_process(pid); - match (is_process_refreshed, system.process(pid)) { - (true, Some(process_info)) => { - let cpu_usage = process_info.cpu_usage() as f64; - let memory_usage = process_info.memory() * 1024; - log::trace!( - target: "bridge-metrics", - "Refreshed process metrics: CPU={}, memory={}", - cpu_usage, - memory_usage, - ); - - self.process_cpu_usage_percentage.set(if cpu_usage.is_finite() { - cpu_usage - } else { - 0f64 - }); - self.process_memory_usage_bytes.set(memory_usage); - }, - _ => { - log::warn!( - target: "bridge-metrics", - "Failed to refresh process information. Metrics may show obsolete values", - ); - }, - } - } - - fn update_interval(&self) -> Duration { - UPDATE_INTERVAL - } -} diff --git a/relays/utils/src/relay_loop.rs b/relays/utils/src/relay_loop.rs deleted file mode 100644 index 7105190a45831f57ecb728ac0d354157b020db8a..0000000000000000000000000000000000000000 --- a/relays/utils/src/relay_loop.rs +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -use crate::{ - error::Error, - metrics::{Metric, MetricsAddress, MetricsParams}, - FailedClient, MaybeConnectionError, -}; - -use async_trait::async_trait; -use std::{fmt::Debug, future::Future, net::SocketAddr, time::Duration}; -use substrate_prometheus_endpoint::{init_prometheus, Registry}; - -/// Default pause between reconnect attempts. -pub const RECONNECT_DELAY: Duration = Duration::from_secs(10); - -/// Basic blockchain client from relay perspective. -#[async_trait] -pub trait Client: 'static + Clone + Send + Sync { - /// Type of error these clients returns. - type Error: 'static + Debug + MaybeConnectionError + Send + Sync; - - /// Try to reconnect to source node. - async fn reconnect(&mut self) -> Result<(), Self::Error>; - - /// Try to reconnect to the source node in an infinite loop until it succeeds. - async fn reconnect_until_success(&mut self, delay: Duration) { - loop { - match self.reconnect().await { - Ok(()) => break, - Err(error) => { - log::warn!( - target: "bridge", - "Failed to reconnect to client. Going to retry in {}s: {:?}", - delay.as_secs(), - error, - ); - - async_std::task::sleep(delay).await; - }, - } - } - } -} - -#[async_trait] -impl Client for () { - type Error = crate::StringifiedMaybeConnectionError; - - async fn reconnect(&mut self) -> Result<(), Self::Error> { - Ok(()) - } -} - -/// Returns generic loop that may be customized and started. -pub fn relay_loop(source_client: SC, target_client: TC) -> Loop { - Loop { reconnect_delay: RECONNECT_DELAY, source_client, target_client, loop_metric: None } -} - -/// Returns generic relay loop metrics that may be customized and used in one or several relay -/// loops. -pub fn relay_metrics(params: MetricsParams) -> LoopMetrics<(), (), ()> { - LoopMetrics { - relay_loop: Loop { - reconnect_delay: RECONNECT_DELAY, - source_client: (), - target_client: (), - loop_metric: None, - }, - address: params.address, - registry: params.registry, - loop_metric: None, - } -} - -/// Generic relay loop. -pub struct Loop { - reconnect_delay: Duration, - source_client: SC, - target_client: TC, - loop_metric: Option, -} - -/// Relay loop metrics builder. -pub struct LoopMetrics { - relay_loop: Loop, - address: Option, - registry: Registry, - loop_metric: Option, -} - -impl Loop { - /// Customize delay between reconnect attempts. - #[must_use] - pub fn reconnect_delay(mut self, reconnect_delay: Duration) -> Self { - self.reconnect_delay = reconnect_delay; - self - } - - /// Start building loop metrics using given prefix. - pub fn with_metrics(self, params: MetricsParams) -> LoopMetrics { - LoopMetrics { - relay_loop: Loop { - reconnect_delay: self.reconnect_delay, - source_client: self.source_client, - target_client: self.target_client, - loop_metric: None, - }, - address: params.address, - registry: params.registry, - loop_metric: None, - } - } - - /// Run relay loop. - /// - /// This function represents an outer loop, which in turn calls provided `run_loop` function to - /// do actual job. When `run_loop` returns, this outer loop reconnects to failed client (source, - /// target or both) and calls `run_loop` again. - pub async fn run(mut self, loop_name: String, run_loop: R) -> Result<(), Error> - where - R: 'static + Send + Fn(SC, TC, Option) -> F, - F: 'static + Send + Future>, - SC: 'static + Client, - TC: 'static + Client, - LM: 'static + Send + Clone, - { - let run_loop_task = async move { - crate::initialize::initialize_loop(loop_name); - - loop { - let loop_metric = self.loop_metric.clone(); - let future_result = - run_loop(self.source_client.clone(), self.target_client.clone(), loop_metric); - let result = future_result.await; - - match result { - Ok(()) => break, - Err(failed_client) => { - log::debug!(target: "bridge", "Restarting relay loop"); - - reconnect_failed_client( - failed_client, - self.reconnect_delay, - &mut self.source_client, - &mut self.target_client, - ) - .await - }, - } - } - Ok(()) - }; - - async_std::task::spawn(run_loop_task).await - } -} - -impl LoopMetrics { - /// Add relay loop metrics. - /// - /// Loop metrics will be passed to the loop callback. - pub fn loop_metric( - self, - metric: NewLM, - ) -> Result, Error> { - metric.register(&self.registry)?; - - Ok(LoopMetrics { - relay_loop: self.relay_loop, - address: self.address, - registry: self.registry, - loop_metric: Some(metric), - }) - } - - /// Convert into `MetricsParams` structure so that metrics registry may be extended later. - pub fn into_params(self) -> MetricsParams { - MetricsParams { address: self.address, registry: self.registry } - } - - /// Expose metrics using address passed at creation. - /// - /// If passed `address` is `None`, metrics are not exposed. - pub async fn expose(self) -> Result, Error> { - if let Some(address) = self.address { - let socket_addr = SocketAddr::new( - address - .host - .parse() - .map_err(|err| Error::ExposingMetricsInvalidHost(address.host.clone(), err))?, - address.port, - ); - - let registry = self.registry; - async_std::task::spawn(async move { - let runtime = - match tokio::runtime::Builder::new_current_thread().enable_all().build() { - Ok(runtime) => runtime, - Err(err) => { - log::trace!( - target: "bridge-metrics", - "Failed to create tokio runtime. Prometheus metrics are not available: {:?}", - err, - ); - return - }, - }; - - runtime.block_on(async move { - log::trace!( - target: "bridge-metrics", - "Starting prometheus endpoint at: {:?}", - socket_addr, - ); - let result = init_prometheus(socket_addr, registry).await; - log::trace!( - target: "bridge-metrics", - "Prometheus endpoint has exited with result: {:?}", - result, - ); - }); - }); - } - - Ok(Loop { - reconnect_delay: self.relay_loop.reconnect_delay, - source_client: self.relay_loop.source_client, - target_client: self.relay_loop.target_client, - loop_metric: self.loop_metric, - }) - } -} - -/// Deal with the clients that have returned connection error. -pub async fn reconnect_failed_client( - failed_client: FailedClient, - reconnect_delay: Duration, - source_client: &mut impl Client, - target_client: &mut impl Client, -) { - if failed_client == FailedClient::Source || failed_client == FailedClient::Both { - source_client.reconnect_until_success(reconnect_delay).await; - } - - if failed_client == FailedClient::Target || failed_client == FailedClient::Both { - target_client.reconnect_until_success(reconnect_delay).await; - } -} diff --git a/scripts/regenerate_runtimes.sh b/scripts/regenerate_runtimes.sh index 700f4dc1c86e8d8dda4196c8f00aa8f77e45d6cd..0a97e2b6ce88bd411c2324803c2d5fd22931512b 100755 --- a/scripts/regenerate_runtimes.sh +++ b/scripts/regenerate_runtimes.sh @@ -13,6 +13,9 @@ cargo run --bin runtime-codegen -- --from-node-url "wss://rpc.polkadot.io:443" > # TODO: there is a bug, probably needs to update subxt, generates: `::sp_runtime::generic::Header<::core::primitive::u32>` withtout second `Hash` parameter. # cargo run --bin runtime-codegen -- --from-wasm-file ../../../polkadot-sdk/target/release/wbuild/bridge-hub-rococo-runtime/bridge_hub_rococo_runtime.compact.compressed.wasm > ../../relays/client-bridge-hub-rococo/src/codegen_runtime.rs # cargo run --bin runtime-codegen -- --from-wasm-file ../../../polkadot-sdk/target/release/wbuild/bridge-hub-westend-runtime/bridge_hub_westend_runtime.compact.compressed.wasm > ../../relays/client-bridge-hub-westend/src/codegen_runtime.rs +# OR for production runtimes: +# cargo run --bin runtime-codegen -- --from-node-url wss://kusama-bridge-hub-rpc.polkadot.io/ > ../../relay-clients/client-bridge-hub-kusama/src/codegen_runtime.rs +# cargo run --bin runtime-codegen -- --from-node-url wss://polkadot-bridge-hub-rpc.polkadot.io/ > ../../relay-clients/client-bridge-hub-polkadot/src/codegen_runtime.rs cd - cargo fmt --all diff --git a/scripts/verify-pallets-build.sh b/scripts/verify-pallets-build.sh deleted file mode 100755 index b96bbf1833b6b3ce2bb34d2dc34aa5b8f54eb528..0000000000000000000000000000000000000000 --- a/scripts/verify-pallets-build.sh +++ /dev/null @@ -1,138 +0,0 @@ -#!/bin/bash - -# A script to remove everything from bridges repository/subtree, except: -# -# - modules/grandpa; -# - modules/messages; -# - modules/parachains; -# - modules/relayers; -# - everything required from primitives folder. - -set -eux - -# show CLI help -function show_help() { - set +x - echo " " - echo Error: $1 - echo "Usage:" - echo " ./scripts/verify-pallets-build.sh Exit with code 0 if pallets code is well decoupled from the other code in the repo" - echo "Options:" - echo " --no-revert Leaves only runtime code on exit" - echo " --ignore-git-state Ignores git actual state" - exit 1 -} - -# parse CLI args -NO_REVERT= -IGNORE_GIT_STATE= -for i in "$@" -do - case $i in - --no-revert) - NO_REVERT=true - shift - ;; - --ignore-git-state) - IGNORE_GIT_STATE=true - shift - ;; - *) - show_help "Unknown option: $i" - ;; - esac -done - -# the script is able to work only on clean git copy, unless we want to ignore this check -[[ ! -z "${IGNORE_GIT_STATE}" ]] || [[ -z "$(git status --porcelain)" ]] || { echo >&2 "The git copy must be clean"; exit 1; } - -# let's avoid any restrictions on where this script can be called for - bridges repo may be -# plugged into any other repo folder. So the script (and other stuff that needs to be removed) -# may be located either in call dir, or one of it subdirs. -BRIDGES_FOLDER="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )/.." - -# let's leave repository/subtree in its original (clean) state if something fails below -function revert_to_clean_state { - [[ ! -z "${NO_REVERT}" ]] || { echo "Reverting to clean state..."; git checkout .; } -} -trap revert_to_clean_state EXIT - -# remove everything we think is not required for our needs -rm -rf $BRIDGES_FOLDER/.config -rm -rf $BRIDGES_FOLDER/.github -rm -rf $BRIDGES_FOLDER/.maintain -rm -rf $BRIDGES_FOLDER/deployments -rm -f $BRIDGES_FOLDER/docs/dockerhub-* -rm -rf $BRIDGES_FOLDER/fuzz -rm -rf $BRIDGES_FOLDER/modules/beefy -rm -rf $BRIDGES_FOLDER/modules/shift-session-manager -rm -rf $BRIDGES_FOLDER/primitives/beefy -rm -rf $BRIDGES_FOLDER/relays -rm -rf $BRIDGES_FOLDER/scripts/add_license.sh -rm -rf $BRIDGES_FOLDER/scripts/build-containers.sh -rm -rf $BRIDGES_FOLDER/scripts/ci-cache.sh -rm -rf $BRIDGES_FOLDER/scripts/dump-logs.sh -rm -rf $BRIDGES_FOLDER/scripts/license_header -rm -rf $BRIDGES_FOLDER/scripts/regenerate_runtimes.sh -rm -rf $BRIDGES_FOLDER/scripts/update-weights.sh -rm -rf $BRIDGES_FOLDER/scripts/update-weights-setup.sh -rm -rf $BRIDGES_FOLDER/scripts/update_substrate.sh -rm -rf $BRIDGES_FOLDER/tools -rm -f $BRIDGES_FOLDER/.dockerignore -rm -f $BRIDGES_FOLDER/local.Dockerfile.dockerignore -rm -f $BRIDGES_FOLDER/deny.toml -rm -f $BRIDGES_FOLDER/.gitlab-ci.yml -rm -f $BRIDGES_FOLDER/.editorconfig -rm -f $BRIDGES_FOLDER/Cargo.toml -rm -f $BRIDGES_FOLDER/ci.Dockerfile -rm -f $BRIDGES_FOLDER/local.Dockerfile -rm -f $BRIDGES_FOLDER/CODEOWNERS -rm -f $BRIDGES_FOLDER/Dockerfile -rm -f $BRIDGES_FOLDER/rustfmt.toml - -# let's fix Cargo.toml a bit (it'll be helpful if we are in the bridges repo) -if [[ ! -f "Cargo.toml" ]]; then - cat > Cargo.toml <<-CARGO_TOML - [workspace.package] - authors = ["Parity Technologies "] - edition = "2021" - repository = "https://github.com/paritytech/parity-bridges-common.git" - license = "GPL-3.0-only" - - [workspace] - resolver = "2" - - members = [ - "bin/runtime-common", - "modules/*", - "primitives/*", - ] - CARGO_TOML -fi - -# let's test if everything we need compiles - -cargo check -p pallet-bridge-grandpa -cargo check -p pallet-bridge-grandpa --features runtime-benchmarks -cargo check -p pallet-bridge-grandpa --features try-runtime -cargo check -p pallet-bridge-messages -cargo check -p pallet-bridge-messages --features runtime-benchmarks -cargo check -p pallet-bridge-messages --features try-runtime -cargo check -p pallet-bridge-parachains -cargo check -p pallet-bridge-parachains --features runtime-benchmarks -cargo check -p pallet-bridge-parachains --features try-runtime -cargo check -p pallet-bridge-relayers -cargo check -p pallet-bridge-relayers --features runtime-benchmarks -cargo check -p pallet-bridge-relayers --features try-runtime -cargo check -p pallet-xcm-bridge-hub-router -cargo check -p pallet-xcm-bridge-hub-router --features runtime-benchmarks -cargo check -p pallet-xcm-bridge-hub-router --features try-runtime -cargo check -p bridge-runtime-common -cargo check -p bridge-runtime-common --features runtime-benchmarks -cargo check -p bridge-runtime-common --features integrity-test - -# we're removing lock file after all chechs are done. Otherwise we may use different -# Substrate/Polkadot/Cumulus commits and our checks will fail -rm -f $BRIDGES_FOLDER/Cargo.lock - -echo "OK" diff --git a/substrate-relay/Cargo.toml b/substrate-relay/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..022296d92172b93732f6b00951d9f8f3f07f1950 --- /dev/null +++ b/substrate-relay/Cargo.toml @@ -0,0 +1,67 @@ +[package] +name = "substrate-relay" +version = "1.4.0" +authors.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +anyhow = "1.0" +async-std = "1.9.0" +async-trait = "0.1.80" +codec = { package = "parity-scale-codec", version = "3.6.1" } +env_logger = "0.11" +futures = "0.3.30" +hex = "0.4" +log = { workspace = true } +num-format = "0.4" +num-traits = "0.2" +rbtag = "0.3" +structopt = "0.3" +signal-hook = "0.3.15" +signal-hook-async-std = "0.2.2" +strum = { version = "0.26.2", features = ["derive"] } + +# Bridge dependencies +bp-bridge-hub-polkadot = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-bridge-hub-rococo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-header-chain = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-messages = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-bulletin = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-polkadot-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-rococo = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +bridge-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +pallet-bridge-parachains = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +parachains-relay = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-bridge-hub-kusama-client = { path = "../relay-clients/client-bridge-hub-kusama" } +relay-bridge-hub-polkadot-client = { path = "../relay-clients/client-bridge-hub-polkadot" } +relay-bridge-hub-rococo-client = { path = "../relay-clients/client-bridge-hub-rococo" } +relay-bridge-hub-westend-client = { path = "../relay-clients/client-bridge-hub-westend" } +relay-kusama-client = { path = "../relay-clients/client-kusama" } +relay-polkadot-client = { path = "../relay-clients/client-polkadot" } +relay-polkadot-bulletin-client = { path = "../relay-clients/client-polkadot-bulletin" } +relay-rococo-client = { path = "../relay-clients/client-rococo" } +relay-substrate-client = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +relay-westend-client = { path = "../relay-clients/client-westend" } +substrate-relay-helper = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +# Substrate Dependencies + +frame-support = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } + +[dev-dependencies] +bp-test-utils = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +hex-literal = "0.4" +sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk", branch = "master" } +tempfile = "3.10" +finality-grandpa = { version = "0.16.2" } diff --git a/relays/bin-substrate/src/bridges/kusama_polkadot/bridge_hub_kusama_messages_to_bridge_hub_polkadot.rs b/substrate-relay/src/bridges/kusama_polkadot/bridge_hub_kusama_messages_to_bridge_hub_polkadot.rs similarity index 94% rename from relays/bin-substrate/src/bridges/kusama_polkadot/bridge_hub_kusama_messages_to_bridge_hub_polkadot.rs rename to substrate-relay/src/bridges/kusama_polkadot/bridge_hub_kusama_messages_to_bridge_hub_polkadot.rs index e57302315560d09db32077beb52db9646534255e..fc239ca1ed387a89ffcc9915a32c8ab264fcd861 100644 --- a/relays/bin-substrate/src/bridges/kusama_polkadot/bridge_hub_kusama_messages_to_bridge_hub_polkadot.rs +++ b/substrate-relay/src/bridges/kusama_polkadot/bridge_hub_kusama_messages_to_bridge_hub_polkadot.rs @@ -16,10 +16,13 @@ //! BridgeHubKusama-to-BridgeHubPolkadot messages sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_bridge_hub_kusama_client::BridgeHubKusama; use relay_bridge_hub_polkadot_client::BridgeHubPolkadot; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; /// BridgeHubKusama-to-BridgeHubPolkadot messages bridge. pub struct BridgeHubKusamaToBridgeHubPolkadotMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/kusama_polkadot/bridge_hub_polkadot_messages_to_bridge_hub_kusama.rs b/substrate-relay/src/bridges/kusama_polkadot/bridge_hub_polkadot_messages_to_bridge_hub_kusama.rs similarity index 94% rename from relays/bin-substrate/src/bridges/kusama_polkadot/bridge_hub_polkadot_messages_to_bridge_hub_kusama.rs rename to substrate-relay/src/bridges/kusama_polkadot/bridge_hub_polkadot_messages_to_bridge_hub_kusama.rs index 0a1b21cd11243e2a39c8156a40174ec770f12f93..8d8e5e0c35e56dbcc5edbd2991ebd605e9f5150a 100644 --- a/relays/bin-substrate/src/bridges/kusama_polkadot/bridge_hub_polkadot_messages_to_bridge_hub_kusama.rs +++ b/substrate-relay/src/bridges/kusama_polkadot/bridge_hub_polkadot_messages_to_bridge_hub_kusama.rs @@ -16,10 +16,13 @@ //! BridgeHubPolkadot-to-BridgeHubKusama messages sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_bridge_hub_kusama_client::BridgeHubKusama; use relay_bridge_hub_polkadot_client::BridgeHubPolkadot; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; /// BridgeHubPolkadot-to-BridgeHubKusama messages bridge. pub struct BridgeHubPolkadotToBridgeHubKusamaMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/kusama_polkadot/kusama_headers_to_bridge_hub_polkadot.rs b/substrate-relay/src/bridges/kusama_polkadot/kusama_headers_to_bridge_hub_polkadot.rs similarity index 98% rename from relays/bin-substrate/src/bridges/kusama_polkadot/kusama_headers_to_bridge_hub_polkadot.rs rename to substrate-relay/src/bridges/kusama_polkadot/kusama_headers_to_bridge_hub_polkadot.rs index dafb5f568263d4bb5b791a3e8d19298b004e6099..196a22cd70d79564720339aa1c3e0d2a81cbf68c 100644 --- a/relays/bin-substrate/src/bridges/kusama_polkadot/kusama_headers_to_bridge_hub_polkadot.rs +++ b/substrate-relay/src/bridges/kusama_polkadot/kusama_headers_to_bridge_hub_polkadot.rs @@ -16,7 +16,7 @@ //! Kusama-to-BridgeHubPolkadot headers sync entrypoint. -use crate::cli::bridge::{ +use substrate_relay_helper::cli::bridge::{ CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, }; diff --git a/relays/bin-substrate/src/bridges/kusama_polkadot/kusama_parachains_to_bridge_hub_polkadot.rs b/substrate-relay/src/bridges/kusama_polkadot/kusama_parachains_to_bridge_hub_polkadot.rs similarity index 93% rename from relays/bin-substrate/src/bridges/kusama_polkadot/kusama_parachains_to_bridge_hub_polkadot.rs rename to substrate-relay/src/bridges/kusama_polkadot/kusama_parachains_to_bridge_hub_polkadot.rs index 118ddfa11d55b5544510220aad82fed7b2623cba..82f7775576342fbc6952934ae9dcee6a73bc447e 100644 --- a/relays/bin-substrate/src/bridges/kusama_polkadot/kusama_parachains_to_bridge_hub_polkadot.rs +++ b/substrate-relay/src/bridges/kusama_polkadot/kusama_parachains_to_bridge_hub_polkadot.rs @@ -16,11 +16,11 @@ //! Kusama-to-BridgeHubPolkadot parachains sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use relay_substrate_client::{CallOf, HeaderIdOf}; -use substrate_relay_helper::parachains::{ - SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline, +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, + parachains::{SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline}, }; /// Kusama-to-BridgeHubPolkadot parachain sync description. diff --git a/relays/bin-substrate/src/bridges/kusama_polkadot/mod.rs b/substrate-relay/src/bridges/kusama_polkadot/mod.rs similarity index 100% rename from relays/bin-substrate/src/bridges/kusama_polkadot/mod.rs rename to substrate-relay/src/bridges/kusama_polkadot/mod.rs diff --git a/relays/bin-substrate/src/bridges/kusama_polkadot/polkadot_headers_to_bridge_hub_kusama.rs b/substrate-relay/src/bridges/kusama_polkadot/polkadot_headers_to_bridge_hub_kusama.rs similarity index 98% rename from relays/bin-substrate/src/bridges/kusama_polkadot/polkadot_headers_to_bridge_hub_kusama.rs rename to substrate-relay/src/bridges/kusama_polkadot/polkadot_headers_to_bridge_hub_kusama.rs index 019afab0bb3d61bcffabbf029167909d54da85a0..d96326a288de9bb3bf536bb5de42af792b49f419 100644 --- a/relays/bin-substrate/src/bridges/kusama_polkadot/polkadot_headers_to_bridge_hub_kusama.rs +++ b/substrate-relay/src/bridges/kusama_polkadot/polkadot_headers_to_bridge_hub_kusama.rs @@ -16,7 +16,7 @@ //! Polkadot-to-KusamaBridgeHub headers sync entrypoint. -use crate::cli::bridge::{ +use substrate_relay_helper::cli::bridge::{ CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, }; diff --git a/relays/bin-substrate/src/bridges/kusama_polkadot/polkadot_parachains_to_bridge_hub_kusama.rs b/substrate-relay/src/bridges/kusama_polkadot/polkadot_parachains_to_bridge_hub_kusama.rs similarity index 93% rename from relays/bin-substrate/src/bridges/kusama_polkadot/polkadot_parachains_to_bridge_hub_kusama.rs rename to substrate-relay/src/bridges/kusama_polkadot/polkadot_parachains_to_bridge_hub_kusama.rs index 848a239473f486280039ffaa950d4c1c7df9f752..17c7b29d9f4bd593c0f2faface5fdc586811409e 100644 --- a/relays/bin-substrate/src/bridges/kusama_polkadot/polkadot_parachains_to_bridge_hub_kusama.rs +++ b/substrate-relay/src/bridges/kusama_polkadot/polkadot_parachains_to_bridge_hub_kusama.rs @@ -16,11 +16,11 @@ //! Polkadot-to-BridgeHubKusama parachains sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use relay_substrate_client::{CallOf, HeaderIdOf}; -use substrate_relay_helper::parachains::{ - SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline, +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, + parachains::{SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline}, }; /// Polkadot-to-BridgeHubKusama parachain sync description. diff --git a/relays/bin-substrate/src/bridges/mod.rs b/substrate-relay/src/bridges/mod.rs similarity index 100% rename from relays/bin-substrate/src/bridges/mod.rs rename to substrate-relay/src/bridges/mod.rs diff --git a/relays/bin-substrate/src/bridges/polkadot_bulletin/bridge_hub_polkadot_messages_to_polkadot_bulletin.rs b/substrate-relay/src/bridges/polkadot_bulletin/bridge_hub_polkadot_messages_to_polkadot_bulletin.rs similarity index 94% rename from relays/bin-substrate/src/bridges/polkadot_bulletin/bridge_hub_polkadot_messages_to_polkadot_bulletin.rs rename to substrate-relay/src/bridges/polkadot_bulletin/bridge_hub_polkadot_messages_to_polkadot_bulletin.rs index ba177271db15fc220deee77c6a11c6d379e300ee..8114d23296f0c47c852d7fe883de9deb49603e40 100644 --- a/relays/bin-substrate/src/bridges/polkadot_bulletin/bridge_hub_polkadot_messages_to_polkadot_bulletin.rs +++ b/substrate-relay/src/bridges/polkadot_bulletin/bridge_hub_polkadot_messages_to_polkadot_bulletin.rs @@ -16,10 +16,13 @@ //! BridgeHubPolkadot-to-PolkadotBulletin messages sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_bridge_hub_polkadot_client::BridgeHubPolkadot; use relay_polkadot_bulletin_client::PolkadotBulletin; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; /// BridgeHubPolkadot-to-PolkadotBulletin messages bridge. pub struct BridgeHubPolkadotToPolkadotBulletinMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/polkadot_bulletin/mod.rs b/substrate-relay/src/bridges/polkadot_bulletin/mod.rs similarity index 100% rename from relays/bin-substrate/src/bridges/polkadot_bulletin/mod.rs rename to substrate-relay/src/bridges/polkadot_bulletin/mod.rs diff --git a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_bulletin_headers_to_bridge_hub_polkadot.rs b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_bulletin_headers_to_bridge_hub_polkadot.rs similarity index 98% rename from relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_bulletin_headers_to_bridge_hub_polkadot.rs rename to substrate-relay/src/bridges/polkadot_bulletin/polkadot_bulletin_headers_to_bridge_hub_polkadot.rs index 7019a6b55a36aef02ebf560931fe743df20e6506..eb63785d3bb3942f48e203d6afbe0ea9b931f1ff 100644 --- a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_bulletin_headers_to_bridge_hub_polkadot.rs +++ b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_bulletin_headers_to_bridge_hub_polkadot.rs @@ -16,11 +16,6 @@ //! PolkadotBulletin-to-BridgeHubPolkadot headers sync entrypoint. -use crate::cli::bridge::{ - CliBridgeBase, MessagesCliBridge, RelayToRelayEquivocationDetectionCliBridge, - RelayToRelayHeadersCliBridge, -}; - use async_trait::async_trait; use substrate_relay_helper::{ equivocation::SubstrateEquivocationDetectionPipeline, @@ -28,6 +23,11 @@ use substrate_relay_helper::{ finality_base::{engine::Grandpa as GrandpaFinalityEngine, SubstrateFinalityPipeline}, }; +use substrate_relay_helper::cli::bridge::{ + CliBridgeBase, MessagesCliBridge, RelayToRelayEquivocationDetectionCliBridge, + RelayToRelayHeadersCliBridge, +}; + /// Description of `PolkadotBulletin` -> `PolkadotBridgeHub` finalized headers bridge. #[derive(Clone, Debug)] pub struct PolkadotBulletinFinalityToBridgeHubPolkadot; diff --git a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_bulletin_messages_to_bridge_hub_polkadot.rs b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_bulletin_messages_to_bridge_hub_polkadot.rs similarity index 94% rename from relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_bulletin_messages_to_bridge_hub_polkadot.rs rename to substrate-relay/src/bridges/polkadot_bulletin/polkadot_bulletin_messages_to_bridge_hub_polkadot.rs index 1b5f3e5f69e84ffa79c8a6cacfdbd9d7ef0bdd38..1c04f8788101670237ceaf5df7ce885b89fe2e00 100644 --- a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_bulletin_messages_to_bridge_hub_polkadot.rs +++ b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_bulletin_messages_to_bridge_hub_polkadot.rs @@ -16,10 +16,13 @@ //! PolkadotBulletin-to-BridgeHubPolkadot messages sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_bridge_hub_polkadot_client::BridgeHubPolkadot; use relay_polkadot_bulletin_client::PolkadotBulletin; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; /// PolkadotBulletin-to-BridgeHubPolkadot messages bridge. pub struct PolkadotBulletinToBridgeHubPolkadotMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_headers_to_polkadot_bulletin.rs b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_headers_to_polkadot_bulletin.rs similarity index 98% rename from relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_headers_to_polkadot_bulletin.rs rename to substrate-relay/src/bridges/polkadot_bulletin/polkadot_headers_to_polkadot_bulletin.rs index 897c2ac884f91c4e9166901d7db6b825fb4e5e55..7996d1613c8dd31390c1bc2667410d920d60f7f2 100644 --- a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_headers_to_polkadot_bulletin.rs +++ b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_headers_to_polkadot_bulletin.rs @@ -16,10 +16,6 @@ //! Polkadot-to-PolkadotBulletin headers sync entrypoint. -use crate::cli::bridge::{ - CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, -}; - use async_trait::async_trait; use substrate_relay_helper::{ equivocation::SubstrateEquivocationDetectionPipeline, @@ -27,6 +23,10 @@ use substrate_relay_helper::{ finality_base::{engine::Grandpa as GrandpaFinalityEngine, SubstrateFinalityPipeline}, }; +use substrate_relay_helper::cli::bridge::{ + CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, +}; + /// Description of Polkadot -> `PolkadotBulletin` finalized headers bridge. #[derive(Clone, Debug)] pub struct PolkadotFinalityToPolkadotBulletin; diff --git a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_parachains_to_polkadot_bulletin.rs b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_parachains_to_polkadot_bulletin.rs similarity index 97% rename from relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_parachains_to_polkadot_bulletin.rs rename to substrate-relay/src/bridges/polkadot_bulletin/polkadot_parachains_to_polkadot_bulletin.rs index 55b094e2128eb9c36118f388210ab7cb46372b91..94a635c7d31b1fffdef919d9e59bdc5ade6b2c23 100644 --- a/relays/bin-substrate/src/bridges/polkadot_bulletin/polkadot_parachains_to_polkadot_bulletin.rs +++ b/substrate-relay/src/bridges/polkadot_bulletin/polkadot_parachains_to_polkadot_bulletin.rs @@ -16,7 +16,9 @@ //! Polkadot-to-PolkadotBulletin parachains sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}; +use substrate_relay_helper::cli::bridge::{ + CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge, +}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use bp_runtime::Chain; diff --git a/relays/bin-substrate/src/bridges/rococo_bulletin/bridge_hub_rococo_messages_to_rococo_bulletin.rs b/substrate-relay/src/bridges/rococo_bulletin/bridge_hub_rococo_messages_to_rococo_bulletin.rs similarity index 94% rename from relays/bin-substrate/src/bridges/rococo_bulletin/bridge_hub_rococo_messages_to_rococo_bulletin.rs rename to substrate-relay/src/bridges/rococo_bulletin/bridge_hub_rococo_messages_to_rococo_bulletin.rs index a2de83831c9bf726de0392b3268fdda59baf22c9..b8e95556bff112de7279fe0eee53ae57db2ec10e 100644 --- a/relays/bin-substrate/src/bridges/rococo_bulletin/bridge_hub_rococo_messages_to_rococo_bulletin.rs +++ b/substrate-relay/src/bridges/rococo_bulletin/bridge_hub_rococo_messages_to_rococo_bulletin.rs @@ -17,9 +17,12 @@ //! BridgeHubRococo-to-RococoBulletin messages sync entrypoint. use super::BridgeHubRococoAsBridgeHubPolkadot; -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_polkadot_bulletin_client::PolkadotBulletin as RococoBulletin; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; /// BridgeHubRococo-to-RococoBulletin messages bridge. pub struct BridgeHubRococoToRococoBulletinMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/rococo_bulletin/mod.rs b/substrate-relay/src/bridges/rococo_bulletin/mod.rs similarity index 96% rename from relays/bin-substrate/src/bridges/rococo_bulletin/mod.rs rename to substrate-relay/src/bridges/rococo_bulletin/mod.rs index cc92c5de6290c09d0fc685c239463adea4dc2c03..f8dc0f6f2e67c181229df6dee5cb2884357058e4 100644 --- a/relays/bin-substrate/src/bridges/rococo_bulletin/mod.rs +++ b/substrate-relay/src/bridges/rococo_bulletin/mod.rs @@ -16,8 +16,6 @@ //! Declaration of all bridges between Rococo Bulletin Chain and Rococo Bridge Hub. -use crate::cli::CliChain; - use bp_messages::MessageNonce; use bp_runtime::{ AccountIdOf, BalanceOf, BlockNumberOf, ChainId, HashOf, HasherOf, HeaderOf, NonceOf, @@ -25,7 +23,8 @@ use bp_runtime::{ }; use frame_support::pallet_prelude::Weight; use relay_substrate_client::{ - Error as SubstrateError, SignParam, SimpleRuntimeVersion, UnsignedTransaction, + ChainWithRuntimeVersion, Error as SubstrateError, SignParam, SimpleRuntimeVersion, + UnsignedTransaction, }; use sp_core::storage::StorageKey; use std::time::Duration; @@ -65,8 +64,8 @@ impl bp_header_chain::ChainWithGrandpa for RococoBaseAsPolkadot { const WITH_CHAIN_GRANDPA_PALLET_NAME: &'static str = bp_polkadot::Polkadot::WITH_CHAIN_GRANDPA_PALLET_NAME; const MAX_AUTHORITIES_COUNT: u32 = bp_rococo::Rococo::MAX_AUTHORITIES_COUNT; - const REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY: u32 = - bp_rococo::Rococo::REASONABLE_HEADERS_IN_JUSTIFICATON_ANCESTRY; + const REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY: u32 = + bp_rococo::Rococo::REASONABLE_HEADERS_IN_JUSTIFICATION_ANCESTRY; const MAX_MANDATORY_HEADER_SIZE: u32 = bp_rococo::Rococo::MAX_MANDATORY_HEADER_SIZE; const AVERAGE_HEADER_SIZE: u32 = bp_rococo::Rococo::AVERAGE_HEADER_SIZE; } @@ -131,7 +130,7 @@ impl relay_substrate_client::ChainWithTransactions for RococoAsPolkadot { } } -impl CliChain for RococoAsPolkadot { +impl ChainWithRuntimeVersion for RococoAsPolkadot { const RUNTIME_VERSION: Option = None; } @@ -239,7 +238,7 @@ impl relay_substrate_client::ChainWithMessages for BridgeHubRococoAsBridgeHubPol relay_bridge_hub_polkadot_client::BridgeHubPolkadot::FROM_CHAIN_MESSAGE_DETAILS_METHOD; } -impl CliChain for BridgeHubRococoAsBridgeHubPolkadot { +impl ChainWithRuntimeVersion for BridgeHubRococoAsBridgeHubPolkadot { const RUNTIME_VERSION: Option = Some(SimpleRuntimeVersion { spec_version: 1_003_000, transaction_version: 3 }); } diff --git a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_bulletin_headers_to_bridge_hub_rococo.rs b/substrate-relay/src/bridges/rococo_bulletin/rococo_bulletin_headers_to_bridge_hub_rococo.rs similarity index 98% rename from relays/bin-substrate/src/bridges/rococo_bulletin/rococo_bulletin_headers_to_bridge_hub_rococo.rs rename to substrate-relay/src/bridges/rococo_bulletin/rococo_bulletin_headers_to_bridge_hub_rococo.rs index e897cd85967dfcb62f2c3832759891bf043b7517..0d54fd21018d168ed1e7dd7d62381b4f558a32e5 100644 --- a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_bulletin_headers_to_bridge_hub_rococo.rs +++ b/substrate-relay/src/bridges/rococo_bulletin/rococo_bulletin_headers_to_bridge_hub_rococo.rs @@ -17,10 +17,6 @@ //! RococoBulletin-to-BridgeHubRococo headers sync entrypoint. use super::BridgeHubRococoAsBridgeHubPolkadot; -use crate::cli::bridge::{ - CliBridgeBase, MessagesCliBridge, RelayToRelayEquivocationDetectionCliBridge, - RelayToRelayHeadersCliBridge, -}; use async_trait::async_trait; use substrate_relay_helper::{ @@ -29,6 +25,11 @@ use substrate_relay_helper::{ finality_base::{engine::Grandpa as GrandpaFinalityEngine, SubstrateFinalityPipeline}, }; +use substrate_relay_helper::cli::bridge::{ + CliBridgeBase, MessagesCliBridge, RelayToRelayEquivocationDetectionCliBridge, + RelayToRelayHeadersCliBridge, +}; + /// Description of `RococoBulletin` -> `RococoBridgeHub` finalized headers bridge. #[derive(Clone, Debug)] pub struct RococoBulletinFinalityToBridgeHubRococo; diff --git a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_bulletin_messages_to_bridge_hub_rococo.rs b/substrate-relay/src/bridges/rococo_bulletin/rococo_bulletin_messages_to_bridge_hub_rococo.rs similarity index 94% rename from relays/bin-substrate/src/bridges/rococo_bulletin/rococo_bulletin_messages_to_bridge_hub_rococo.rs rename to substrate-relay/src/bridges/rococo_bulletin/rococo_bulletin_messages_to_bridge_hub_rococo.rs index 856be9cf6f2a801c44655e968d5cf1d976322384..d192ec0381e58c80eb0108652b6066612166971d 100644 --- a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_bulletin_messages_to_bridge_hub_rococo.rs +++ b/substrate-relay/src/bridges/rococo_bulletin/rococo_bulletin_messages_to_bridge_hub_rococo.rs @@ -17,9 +17,12 @@ //! RococoBulletin-to-BridgeHubRococo messages sync entrypoint. use super::BridgeHubRococoAsBridgeHubPolkadot; -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_polkadot_bulletin_client::PolkadotBulletin as RococoBulletin; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; /// RococoBulletin-to-BridgeHubRococo messages bridge. pub struct RococoBulletinToBridgeHubRococoMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_headers_to_rococo_bulletin.rs b/substrate-relay/src/bridges/rococo_bulletin/rococo_headers_to_rococo_bulletin.rs similarity index 98% rename from relays/bin-substrate/src/bridges/rococo_bulletin/rococo_headers_to_rococo_bulletin.rs rename to substrate-relay/src/bridges/rococo_bulletin/rococo_headers_to_rococo_bulletin.rs index 8a4b44eec27b94d5b742aadaed61b4cffc86cb07..45c890267ffbaea037ffcc2ced78bd245d6905a2 100644 --- a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_headers_to_rococo_bulletin.rs +++ b/substrate-relay/src/bridges/rococo_bulletin/rococo_headers_to_rococo_bulletin.rs @@ -17,9 +17,6 @@ //! Rococo-to-RococoBulletin headers sync entrypoint. use super::RococoAsPolkadot; -use crate::cli::bridge::{ - CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, -}; use async_trait::async_trait; use substrate_relay_helper::{ @@ -28,6 +25,10 @@ use substrate_relay_helper::{ finality_base::{engine::Grandpa as GrandpaFinalityEngine, SubstrateFinalityPipeline}, }; +use substrate_relay_helper::cli::bridge::{ + CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, +}; + /// Description of Rococo -> `RococoBulletin` finalized headers bridge. #[derive(Clone, Debug)] pub struct RococoFinalityToRococoBulletin; diff --git a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_parachains_to_rococo_bulletin.rs b/substrate-relay/src/bridges/rococo_bulletin/rococo_parachains_to_rococo_bulletin.rs similarity index 97% rename from relays/bin-substrate/src/bridges/rococo_bulletin/rococo_parachains_to_rococo_bulletin.rs rename to substrate-relay/src/bridges/rococo_bulletin/rococo_parachains_to_rococo_bulletin.rs index c86ef977ee6735e3436023fcd94898281efd8f63..2c67c83ad2a3d8199e3fb8c1822d16df7f6012c7 100644 --- a/relays/bin-substrate/src/bridges/rococo_bulletin/rococo_parachains_to_rococo_bulletin.rs +++ b/substrate-relay/src/bridges/rococo_bulletin/rococo_parachains_to_rococo_bulletin.rs @@ -17,12 +17,12 @@ //! Rococo-to-RococoBulletin parachains sync entrypoint. use super::{BridgeHubRococoAsBridgeHubPolkadot, RococoAsPolkadot}; -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use bp_runtime::Chain; use relay_substrate_client::{CallOf, HeaderIdOf}; use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, messages_lane::MessagesRelayLimits, parachains::{SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline}, }; diff --git a/relays/bin-substrate/src/bridges/rococo_westend/bridge_hub_rococo_messages_to_bridge_hub_westend.rs b/substrate-relay/src/bridges/rococo_westend/bridge_hub_rococo_messages_to_bridge_hub_westend.rs similarity index 94% rename from relays/bin-substrate/src/bridges/rococo_westend/bridge_hub_rococo_messages_to_bridge_hub_westend.rs rename to substrate-relay/src/bridges/rococo_westend/bridge_hub_rococo_messages_to_bridge_hub_westend.rs index cbf122a2d4b08347998ea6453e18c444b45e99ee..ec6b07d982afe18b011013af7ead8348ccbbbafe 100644 --- a/relays/bin-substrate/src/bridges/rococo_westend/bridge_hub_rococo_messages_to_bridge_hub_westend.rs +++ b/substrate-relay/src/bridges/rococo_westend/bridge_hub_rococo_messages_to_bridge_hub_westend.rs @@ -16,10 +16,13 @@ //! BridgeHubRococo-to-BridgeHubWestend messages sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_bridge_hub_rococo_client::BridgeHubRococo; use relay_bridge_hub_westend_client::BridgeHubWestend; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; pub struct BridgeHubRococoToBridgeHubWestendMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/rococo_westend/bridge_hub_westend_messages_to_bridge_hub_rococo.rs b/substrate-relay/src/bridges/rococo_westend/bridge_hub_westend_messages_to_bridge_hub_rococo.rs similarity index 94% rename from relays/bin-substrate/src/bridges/rococo_westend/bridge_hub_westend_messages_to_bridge_hub_rococo.rs rename to substrate-relay/src/bridges/rococo_westend/bridge_hub_westend_messages_to_bridge_hub_rococo.rs index bb823981bf89e279531c1ddcf265fd5cefc38b41..4e978cd8356cf0dd99d8a330a81c14eba6106bd1 100644 --- a/relays/bin-substrate/src/bridges/rococo_westend/bridge_hub_westend_messages_to_bridge_hub_rococo.rs +++ b/substrate-relay/src/bridges/rococo_westend/bridge_hub_westend_messages_to_bridge_hub_rococo.rs @@ -16,10 +16,13 @@ //! BridgeHubWestend-to-BridgeHubRococo messages sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge}; use relay_bridge_hub_rococo_client::BridgeHubRococo; use relay_bridge_hub_westend_client::BridgeHubWestend; -use substrate_relay_helper::{messages_lane::SubstrateMessageLane, UtilityPalletBatchCallBuilder}; +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge}, + messages_lane::SubstrateMessageLane, + UtilityPalletBatchCallBuilder, +}; pub struct BridgeHubWestendToBridgeHubRococoMessagesCliBridge {} diff --git a/relays/bin-substrate/src/bridges/rococo_westend/mod.rs b/substrate-relay/src/bridges/rococo_westend/mod.rs similarity index 100% rename from relays/bin-substrate/src/bridges/rococo_westend/mod.rs rename to substrate-relay/src/bridges/rococo_westend/mod.rs diff --git a/relays/bin-substrate/src/bridges/rococo_westend/rococo_headers_to_bridge_hub_westend.rs b/substrate-relay/src/bridges/rococo_westend/rococo_headers_to_bridge_hub_westend.rs similarity index 98% rename from relays/bin-substrate/src/bridges/rococo_westend/rococo_headers_to_bridge_hub_westend.rs rename to substrate-relay/src/bridges/rococo_westend/rococo_headers_to_bridge_hub_westend.rs index 6e6661d5417c8b2148613df97156641684ff51de..bf30a87bf2d4871b96e909d3c6a009301d77e58b 100644 --- a/relays/bin-substrate/src/bridges/rococo_westend/rococo_headers_to_bridge_hub_westend.rs +++ b/substrate-relay/src/bridges/rococo_westend/rococo_headers_to_bridge_hub_westend.rs @@ -16,10 +16,6 @@ //! Rococo-to-Westend bridge hubs headers sync entrypoint. -use crate::cli::bridge::{ - CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, -}; - use async_trait::async_trait; use substrate_relay_helper::{ equivocation::SubstrateEquivocationDetectionPipeline, @@ -27,6 +23,10 @@ use substrate_relay_helper::{ finality_base::{engine::Grandpa as GrandpaFinalityEngine, SubstrateFinalityPipeline}, }; +use substrate_relay_helper::cli::bridge::{ + CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, +}; + /// Description of Rococo -> Westend finalized headers bridge. #[derive(Clone, Debug)] pub struct RococoFinalityToBridgeHubWestend; diff --git a/relays/bin-substrate/src/bridges/rococo_westend/rococo_parachains_to_bridge_hub_westend.rs b/substrate-relay/src/bridges/rococo_westend/rococo_parachains_to_bridge_hub_westend.rs similarity index 93% rename from relays/bin-substrate/src/bridges/rococo_westend/rococo_parachains_to_bridge_hub_westend.rs rename to substrate-relay/src/bridges/rococo_westend/rococo_parachains_to_bridge_hub_westend.rs index 5b17de5cd6f7fcff1d57cd4528ef675af634efe0..be33bc7292786ddadbf89ea0dd62ca0f44071790 100644 --- a/relays/bin-substrate/src/bridges/rococo_westend/rococo_parachains_to_bridge_hub_westend.rs +++ b/substrate-relay/src/bridges/rococo_westend/rococo_parachains_to_bridge_hub_westend.rs @@ -16,11 +16,11 @@ //! Westend-to-Rococo parachains sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use relay_substrate_client::{CallOf, HeaderIdOf}; -use substrate_relay_helper::parachains::{ - SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline, +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, + parachains::{SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline}, }; /// BridgeHub-to-BridgeHub parachain sync description. diff --git a/relays/bin-substrate/src/bridges/rococo_westend/westend_headers_to_bridge_hub_rococo.rs b/substrate-relay/src/bridges/rococo_westend/westend_headers_to_bridge_hub_rococo.rs similarity index 98% rename from relays/bin-substrate/src/bridges/rococo_westend/westend_headers_to_bridge_hub_rococo.rs rename to substrate-relay/src/bridges/rococo_westend/westend_headers_to_bridge_hub_rococo.rs index 6f4ebb84a834eed94220c8889ea3b520b651b203..4a1419f06dcdff8a22198b70d66144b82b9db4d6 100644 --- a/relays/bin-substrate/src/bridges/rococo_westend/westend_headers_to_bridge_hub_rococo.rs +++ b/substrate-relay/src/bridges/rococo_westend/westend_headers_to_bridge_hub_rococo.rs @@ -16,10 +16,6 @@ //! Westend-to-Rococo bridge hubs headers sync entrypoint. -use crate::cli::bridge::{ - CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, -}; - use async_trait::async_trait; use substrate_relay_helper::{ equivocation::SubstrateEquivocationDetectionPipeline, @@ -27,6 +23,10 @@ use substrate_relay_helper::{ finality_base::{engine::Grandpa as GrandpaFinalityEngine, SubstrateFinalityPipeline}, }; +use substrate_relay_helper::cli::bridge::{ + CliBridgeBase, RelayToRelayEquivocationDetectionCliBridge, RelayToRelayHeadersCliBridge, +}; + /// Description of Westend -> Rococo finalized headers bridge. #[derive(Clone, Debug)] pub struct WestendFinalityToBridgeHubRococo; diff --git a/relays/bin-substrate/src/bridges/rococo_westend/westend_parachains_to_bridge_hub_rococo.rs b/substrate-relay/src/bridges/rococo_westend/westend_parachains_to_bridge_hub_rococo.rs similarity index 93% rename from relays/bin-substrate/src/bridges/rococo_westend/westend_parachains_to_bridge_hub_rococo.rs rename to substrate-relay/src/bridges/rococo_westend/westend_parachains_to_bridge_hub_rococo.rs index 45d3948b14da875fda03bc60cc453a6141e7ccf0..18d1d0716d136e20f34f87f03ee15fa8e5d6fc95 100644 --- a/relays/bin-substrate/src/bridges/rococo_westend/westend_parachains_to_bridge_hub_rococo.rs +++ b/substrate-relay/src/bridges/rococo_westend/westend_parachains_to_bridge_hub_rococo.rs @@ -16,11 +16,11 @@ //! Rococo-to-Westend parachains sync entrypoint. -use crate::cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}; use bp_polkadot_core::parachains::{ParaHash, ParaHeadsProof, ParaId}; use relay_substrate_client::{CallOf, HeaderIdOf}; -use substrate_relay_helper::parachains::{ - SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline, +use substrate_relay_helper::{ + cli::bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, + parachains::{SubmitParachainHeadsCallBuilder, SubstrateParachainsPipeline}, }; /// BridgeHub-to-BridgeHub parachain sync description. diff --git a/substrate-relay/src/cli/chain_schema.rs b/substrate-relay/src/cli/chain_schema.rs new file mode 100644 index 0000000000000000000000000000000000000000..4422332a593249478e2782a97ac148b1f345c593 --- /dev/null +++ b/substrate-relay/src/cli/chain_schema.rs @@ -0,0 +1,110 @@ +// Copyright 2019-2022 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +#[cfg(test)] +mod tests { + use sp_core::Pair; + use substrate_relay_helper::cli::chain_schema::TargetSigningParams; + + #[test] + fn reads_suri_from_file() { + const ALICE: &str = "//Alice"; + const BOB: &str = "//Bob"; + const ALICE_PASSWORD: &str = "alice_password"; + const BOB_PASSWORD: &str = "bob_password"; + + let alice: sp_core::sr25519::Pair = Pair::from_string(ALICE, Some(ALICE_PASSWORD)).unwrap(); + let bob: sp_core::sr25519::Pair = Pair::from_string(BOB, Some(BOB_PASSWORD)).unwrap(); + let bob_with_alice_password = + sp_core::sr25519::Pair::from_string(BOB, Some(ALICE_PASSWORD)).unwrap(); + + let temp_dir = tempfile::tempdir().unwrap(); + let mut suri_file_path = temp_dir.path().to_path_buf(); + let mut password_file_path = temp_dir.path().to_path_buf(); + suri_file_path.push("suri"); + password_file_path.push("password"); + std::fs::write(&suri_file_path, BOB.as_bytes()).unwrap(); + std::fs::write(&password_file_path, BOB_PASSWORD.as_bytes()).unwrap(); + + // when both seed and password are read from file + assert_eq!( + TargetSigningParams { + target_signer: Some(ALICE.into()), + target_signer_password: Some(ALICE_PASSWORD.into()), + + target_signer_file: None, + target_signer_password_file: None, + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(alice.public()), + ); + + // when both seed and password are read from file + assert_eq!( + TargetSigningParams { + target_signer: None, + target_signer_password: None, + + target_signer_file: Some(suri_file_path.clone()), + target_signer_password_file: Some(password_file_path.clone()), + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(bob.public()), + ); + + // when password are is overriden by cli option + assert_eq!( + TargetSigningParams { + target_signer: None, + target_signer_password: Some(ALICE_PASSWORD.into()), + + target_signer_file: Some(suri_file_path.clone()), + target_signer_password_file: Some(password_file_path.clone()), + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(bob_with_alice_password.public()), + ); + + // when both seed and password are overriden by cli options + assert_eq!( + TargetSigningParams { + target_signer: Some(ALICE.into()), + target_signer_password: Some(ALICE_PASSWORD.into()), + + target_signer_file: Some(suri_file_path), + target_signer_password_file: Some(password_file_path), + + target_transactions_mortality: None, + } + .to_keypair::() + .map(|p| p.public()) + .map_err(drop), + Ok(alice.public()), + ); + } +} diff --git a/relays/bin-substrate/src/cli/detect_equivocations.rs b/substrate-relay/src/cli/detect_equivocations.rs similarity index 54% rename from relays/bin-substrate/src/cli/detect_equivocations.rs rename to substrate-relay/src/cli/detect_equivocations.rs index a8f1ed35f3afbdbb6569c7fb5e1f97eba1f3495a..7717b501537543680614714fd4414a4482950f82 100644 --- a/relays/bin-substrate/src/cli/detect_equivocations.rs +++ b/substrate-relay/src/cli/detect_equivocations.rs @@ -14,25 +14,23 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{ - bridges::{ - kusama_polkadot::{ - kusama_headers_to_bridge_hub_polkadot::KusamaToBridgeHubPolkadotCliBridge, - polkadot_headers_to_bridge_hub_kusama::PolkadotToBridgeHubKusamaCliBridge, - }, - rococo_westend::{ - rococo_headers_to_bridge_hub_westend::RococoToBridgeHubWestendCliBridge, - westend_headers_to_bridge_hub_rococo::WestendToBridgeHubRococoCliBridge, - }, +use crate::bridges::{ + kusama_polkadot::{ + kusama_headers_to_bridge_hub_polkadot::KusamaToBridgeHubPolkadotCliBridge, + polkadot_headers_to_bridge_hub_kusama::PolkadotToBridgeHubKusamaCliBridge, + }, + rococo_westend::{ + rococo_headers_to_bridge_hub_westend::RococoToBridgeHubWestendCliBridge, + westend_headers_to_bridge_hub_rococo::WestendToBridgeHubRococoCliBridge, }, - cli::{bridge::*, chain_schema::*, PrometheusParams}, }; -use async_trait::async_trait; -use relay_substrate_client::ChainWithTransactions; use structopt::StructOpt; use strum::{EnumString, VariantNames}; -use substrate_relay_helper::{equivocation, equivocation::SubstrateEquivocationDetectionPipeline}; + +use substrate_relay_helper::cli::detect_equivocations::{ + DetectEquivocationsParams, EquivocationsDetector, +}; /// Start equivocation detection loop. #[derive(StructOpt)] @@ -40,13 +38,7 @@ pub struct DetectEquivocations { #[structopt(possible_values = DetectEquivocationsBridge::VARIANTS, case_insensitive = true)] bridge: DetectEquivocationsBridge, #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - source_sign: SourceSigningParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, + params: DetectEquivocationsParams, } #[derive(Debug, EnumString, VariantNames)] @@ -59,29 +51,6 @@ pub enum DetectEquivocationsBridge { WestendToBridgeHubRococo, } -#[async_trait] -trait EquivocationsDetector: RelayToRelayEquivocationDetectionCliBridge -where - Self::Source: ChainWithTransactions, -{ - async fn start(data: DetectEquivocations) -> anyhow::Result<()> { - let source_client = data.source.into_client::().await?; - Self::Equivocation::start_relay_guards( - &source_client, - source_client.can_start_version_guard(), - ) - .await?; - - equivocation::run::( - source_client, - data.target.into_client::().await?, - data.source_sign.transaction_params::()?, - data.prometheus_params.into_metrics_params()?, - ) - .await - } -} - impl EquivocationsDetector for KusamaToBridgeHubPolkadotCliBridge {} impl EquivocationsDetector for PolkadotToBridgeHubKusamaCliBridge {} impl EquivocationsDetector for RococoToBridgeHubWestendCliBridge {} @@ -92,13 +61,13 @@ impl DetectEquivocations { pub async fn run(self) -> anyhow::Result<()> { match self.bridge { DetectEquivocationsBridge::KusamaToBridgeHubPolkadot => - KusamaToBridgeHubPolkadotCliBridge::start(self), + KusamaToBridgeHubPolkadotCliBridge::start(self.params), DetectEquivocationsBridge::PolkadotToBridgeHubKusama => - PolkadotToBridgeHubKusamaCliBridge::start(self), + PolkadotToBridgeHubKusamaCliBridge::start(self.params), DetectEquivocationsBridge::RococoToBridgeHubWestend => - RococoToBridgeHubWestendCliBridge::start(self), + RococoToBridgeHubWestendCliBridge::start(self.params), DetectEquivocationsBridge::WestendToBridgeHubRococo => - WestendToBridgeHubRococoCliBridge::start(self), + WestendToBridgeHubRococoCliBridge::start(self.params), } .await } diff --git a/relays/bin-substrate/src/cli/init_bridge.rs b/substrate-relay/src/cli/init_bridge.rs similarity index 66% rename from relays/bin-substrate/src/cli/init_bridge.rs rename to substrate-relay/src/cli/init_bridge.rs index 0b2f9aa7e1ec7b50b73bd1e980dc874d61c9057c..441487b35a9667bbd0811b37a4bb40c8e7cda9b6 100644 --- a/relays/bin-substrate/src/cli/init_bridge.rs +++ b/substrate-relay/src/cli/init_bridge.rs @@ -14,107 +14,31 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use async_trait::async_trait; -use codec::Encode; - -use crate::{ - bridges::{ - kusama_polkadot::{ - kusama_headers_to_bridge_hub_polkadot::KusamaToBridgeHubPolkadotCliBridge, - polkadot_headers_to_bridge_hub_kusama::PolkadotToBridgeHubKusamaCliBridge, - }, - polkadot_bulletin::{ - polkadot_bulletin_headers_to_bridge_hub_polkadot::PolkadotBulletinToBridgeHubPolkadotCliBridge, - polkadot_headers_to_polkadot_bulletin::PolkadotToPolkadotBulletinCliBridge, - }, - rococo_bulletin::{ - rococo_bulletin_headers_to_bridge_hub_rococo::RococoBulletinToBridgeHubRococoCliBridge, - rococo_headers_to_rococo_bulletin::RococoToRococoBulletinCliBridge, - }, - rococo_westend::{ - rococo_headers_to_bridge_hub_westend::RococoToBridgeHubWestendCliBridge, - westend_headers_to_bridge_hub_rococo::WestendToBridgeHubRococoCliBridge, - }, +use crate::bridges::{ + kusama_polkadot::{ + kusama_headers_to_bridge_hub_polkadot::KusamaToBridgeHubPolkadotCliBridge, + polkadot_headers_to_bridge_hub_kusama::PolkadotToBridgeHubKusamaCliBridge, + }, + polkadot_bulletin::{ + polkadot_bulletin_headers_to_bridge_hub_polkadot::PolkadotBulletinToBridgeHubPolkadotCliBridge, + polkadot_headers_to_polkadot_bulletin::PolkadotToPolkadotBulletinCliBridge, + }, + rococo_bulletin::{ + rococo_bulletin_headers_to_bridge_hub_rococo::RococoBulletinToBridgeHubRococoCliBridge, + rococo_headers_to_rococo_bulletin::RococoToRococoBulletinCliBridge, + }, + rococo_westend::{ + rococo_headers_to_bridge_hub_westend::RococoToBridgeHubWestendCliBridge, + westend_headers_to_bridge_hub_rococo::WestendToBridgeHubRococoCliBridge, }, - cli::{bridge::CliBridgeBase, chain_schema::*}, }; -use bp_runtime::Chain as ChainBase; -use relay_substrate_client::{AccountKeyPairOf, Chain, UnsignedTransaction}; -use sp_core::Pair; +use relay_substrate_client::Chain; use structopt::StructOpt; use strum::{EnumString, VariantNames}; -use substrate_relay_helper::finality_base::engine::{Engine, Grandpa as GrandpaFinalityEngine}; - -/// Initialize bridge pallet. -#[derive(StructOpt)] -pub struct InitBridge { - /// A bridge instance to initialize. - #[structopt(possible_values = InitBridgeName::VARIANTS, case_insensitive = true)] - bridge: InitBridgeName, - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - /// Generates all required data, but does not submit extrinsic - #[structopt(long)] - dry_run: bool, -} - -#[derive(Debug, EnumString, VariantNames)] -#[strum(serialize_all = "kebab_case")] -/// Bridge to initialize. -pub enum InitBridgeName { - KusamaToBridgeHubPolkadot, - PolkadotToBridgeHubKusama, - PolkadotToPolkadotBulletin, - PolkadotBulletinToBridgeHubPolkadot, - RococoToRococoBulletin, - RococoBulletinToBridgeHubRococo, - RococoToBridgeHubWestend, - WestendToBridgeHubRococo, -} - -#[async_trait] -trait BridgeInitializer: CliBridgeBase -where - ::AccountId: From< as Pair>::Public>, -{ - type Engine: Engine; - - /// Get the encoded call to init the bridge. - fn encode_init_bridge( - init_data: >::InitializationData, - ) -> ::Call; - - /// Initialize the bridge. - async fn init_bridge(data: InitBridge) -> anyhow::Result<()> { - let source_client = data.source.into_client::().await?; - let target_client = data.target.into_client::().await?; - let target_sign = data.target_sign.to_keypair::()?; - let dry_run = data.dry_run; - - substrate_relay_helper::finality::initialize::initialize::( - source_client, - target_client.clone(), - target_sign, - move |transaction_nonce, initialization_data| { - let call = Self::encode_init_bridge(initialization_data); - log::info!( - target: "bridge", - "Initialize bridge call encoded as hex string: {:?}", - format!("0x{}", hex::encode(call.encode())) - ); - Ok(UnsignedTransaction::new(call.into(), transaction_nonce)) - }, - dry_run, - ) - .await; - - Ok(()) - } -} +use substrate_relay_helper::{ + cli::init_bridge::{BridgeInitializer, InitBridgeParams}, + finality_base::engine::{Engine, Grandpa as GrandpaFinalityEngine}, +}; impl BridgeInitializer for RococoToBridgeHubWestendCliBridge { type Engine = GrandpaFinalityEngine; @@ -225,26 +149,50 @@ impl BridgeInitializer for RococoBulletinToBridgeHubRococoCliBridge { } } +/// Initialize bridge pallet. +#[derive(StructOpt)] +pub struct InitBridge { + /// A bridge instance to initialize. + #[structopt(possible_values = InitBridgeName::VARIANTS, case_insensitive = true)] + bridge: InitBridgeName, + #[structopt(flatten)] + params: InitBridgeParams, +} + +#[derive(Debug, EnumString, VariantNames)] +#[strum(serialize_all = "kebab_case")] +/// Bridge to initialize. +pub enum InitBridgeName { + KusamaToBridgeHubPolkadot, + PolkadotToBridgeHubKusama, + PolkadotToPolkadotBulletin, + PolkadotBulletinToBridgeHubPolkadot, + RococoToRococoBulletin, + RococoBulletinToBridgeHubRococo, + RococoToBridgeHubWestend, + WestendToBridgeHubRococo, +} + impl InitBridge { /// Run the command. pub async fn run(self) -> anyhow::Result<()> { match self.bridge { InitBridgeName::KusamaToBridgeHubPolkadot => - KusamaToBridgeHubPolkadotCliBridge::init_bridge(self), + KusamaToBridgeHubPolkadotCliBridge::init_bridge(self.params), InitBridgeName::PolkadotToBridgeHubKusama => - PolkadotToBridgeHubKusamaCliBridge::init_bridge(self), + PolkadotToBridgeHubKusamaCliBridge::init_bridge(self.params), InitBridgeName::PolkadotToPolkadotBulletin => - PolkadotToPolkadotBulletinCliBridge::init_bridge(self), + PolkadotToPolkadotBulletinCliBridge::init_bridge(self.params), InitBridgeName::PolkadotBulletinToBridgeHubPolkadot => - PolkadotBulletinToBridgeHubPolkadotCliBridge::init_bridge(self), + PolkadotBulletinToBridgeHubPolkadotCliBridge::init_bridge(self.params), InitBridgeName::RococoToRococoBulletin => - RococoToRococoBulletinCliBridge::init_bridge(self), + RococoToRococoBulletinCliBridge::init_bridge(self.params), InitBridgeName::RococoBulletinToBridgeHubRococo => - RococoBulletinToBridgeHubRococoCliBridge::init_bridge(self), + RococoBulletinToBridgeHubRococoCliBridge::init_bridge(self.params), InitBridgeName::RococoToBridgeHubWestend => - RococoToBridgeHubWestendCliBridge::init_bridge(self), + RococoToBridgeHubWestendCliBridge::init_bridge(self.params), InitBridgeName::WestendToBridgeHubRococo => - WestendToBridgeHubRococoCliBridge::init_bridge(self), + WestendToBridgeHubRococoCliBridge::init_bridge(self.params), } .await } diff --git a/substrate-relay/src/cli/mod.rs b/substrate-relay/src/cli/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..504058894c21f570c7c3a25a4a4de35d9d7408af --- /dev/null +++ b/substrate-relay/src/cli/mod.rs @@ -0,0 +1,131 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Deal with CLI args of substrate-to-substrate relay. + +use async_std::prelude::*; +use futures::{select, FutureExt}; +use signal_hook::consts::*; +use signal_hook_async_std::Signals; +use structopt::StructOpt; + +mod chain_schema; +mod detect_equivocations; +mod init_bridge; +mod relay_headers; +mod relay_headers_and_messages; +mod relay_messages; +mod relay_parachains; + +/// The target that will be used when publishing logs related to this pallet. +pub const LOG_TARGET: &str = "bridge"; + +/// Parse relay CLI args. +pub fn parse_args() -> Command { + Command::from_args() +} + +/// Substrate-to-Substrate bridge utilities. +#[derive(StructOpt)] +#[structopt(about = "Substrate-to-Substrate relay")] +pub enum Command { + /// Initialize on-chain bridge pallet with current header data. + /// + /// Sends initialization transaction to bootstrap the bridge with current finalized block data. + InitBridge(init_bridge::InitBridge), + /// Start headers relay between two chains. + /// + /// The on-chain bridge component should have been already initialized with + /// `init-bridge` sub-command. + RelayHeaders(relay_headers::RelayHeaders), + /// Relay parachain heads. + RelayParachains(relay_parachains::RelayParachains), + /// Start messages relay between two chains. + /// + /// Ties up to `Messages` pallets on both chains and starts relaying messages. + /// Requires the header relay to be already running. + RelayMessages(relay_messages::RelayMessages), + /// Start headers and messages relay between two Substrate chains. + /// + /// This high-level relay internally starts four low-level relays: two `RelayHeaders` + /// and two `RelayMessages` relays. Headers are only relayed when they are required by + /// the message relays - i.e. when there are messages or confirmations that needs to be + /// relayed between chains. + RelayHeadersAndMessages(Box), + /// Detect and report equivocations. + /// + /// Parses the source chain headers that were synchronized with the target chain looking for + /// equivocations. If any equivocation is found, it is reported to the source chain. + DetectEquivocations(detect_equivocations::DetectEquivocations), +} + +impl Command { + // Initialize logger depending on the command. + fn init_logger(&self) { + use relay_utils::initialize::{initialize_logger, initialize_relay}; + + match self { + Self::InitBridge(_) | + Self::RelayHeaders(_) | + Self::RelayMessages(_) | + Self::RelayHeadersAndMessages(_) => { + initialize_relay(); + }, + _ => { + initialize_logger(false); + }, + } + } + + /// Run the command. + async fn do_run(self) -> anyhow::Result<()> { + match self { + Self::InitBridge(arg) => arg.run().await?, + Self::RelayHeaders(arg) => arg.run().await?, + Self::RelayParachains(arg) => arg.run().await?, + Self::RelayMessages(arg) => arg.run().await?, + Self::RelayHeadersAndMessages(arg) => arg.run().await?, + Self::DetectEquivocations(arg) => arg.run().await?, + } + Ok(()) + } + + /// Run the command. + pub async fn run(self) { + self.init_logger(); + + let exit_signals = match Signals::new([SIGINT, SIGTERM]) { + Ok(signals) => signals, + Err(e) => { + log::error!(target: LOG_TARGET, "Could not register exit signals: {}", e); + return + }, + }; + let run = self.do_run().fuse(); + futures::pin_mut!(exit_signals, run); + + select! { + signal = exit_signals.next().fuse() => { + log::info!(target: LOG_TARGET, "Received exit signal {:?}", signal); + }, + result = run => { + if let Err(e) = result { + log::error!(target: LOG_TARGET, "substrate-relay: {}", e); + } + }, + } + } +} diff --git a/relays/bin-substrate/src/cli/relay_headers.rs b/substrate-relay/src/cli/relay_headers.rs similarity index 63% rename from relays/bin-substrate/src/cli/relay_headers.rs rename to substrate-relay/src/cli/relay_headers.rs index 799bbebaa9a3cbffc5a0a4507be9b3a5f3b3dc89..f47090ca064a2b88e0529120c043493a2664c2fd 100644 --- a/relays/bin-substrate/src/cli/relay_headers.rs +++ b/substrate-relay/src/cli/relay_headers.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use async_trait::async_trait; use structopt::StructOpt; use strum::{EnumString, VariantNames}; @@ -36,10 +35,8 @@ use crate::bridges::{ westend_headers_to_bridge_hub_rococo::WestendToBridgeHubRococoCliBridge, }, }; -use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; -use substrate_relay_helper::{finality::SubstrateFinalitySyncPipeline, HeadersToRelay}; -use crate::cli::{bridge::*, chain_schema::*, PrometheusParams}; +use substrate_relay_helper::cli::relay_headers::{HeadersRelayer, RelayHeadersParams}; /// Start headers relayer process. #[derive(StructOpt)] @@ -47,22 +44,8 @@ pub struct RelayHeaders { /// A bridge instance to relay headers for. #[structopt(possible_values = RelayHeadersBridge::VARIANTS, case_insensitive = true)] bridge: RelayHeadersBridge, - /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) - /// are relayed. - #[structopt(long)] - only_mandatory_headers: bool, - /// If passed, only free headers (mandatory and every Nth header, if configured in runtime) - /// are relayed. Overrides `only_mandatory_headers`. - #[structopt(long)] - only_free_headers: bool, #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - #[structopt(flatten)] - prometheus_params: PrometheusParams, + params: RelayHeadersParams, } impl RelayHeaders { @@ -89,38 +72,6 @@ pub enum RelayHeadersBridge { RococoBulletinToBridgeHubRococo, } -#[async_trait] -trait HeadersRelayer: RelayToRelayHeadersCliBridge { - /// Relay headers. - async fn relay_headers(data: RelayHeaders) -> anyhow::Result<()> { - let headers_to_relay = data.headers_to_relay(); - let source_client = data.source.into_client::().await?; - let target_client = data.target.into_client::().await?; - let target_transactions_mortality = data.target_sign.target_transactions_mortality; - let target_sign = data.target_sign.to_keypair::()?; - - let metrics_params: relay_utils::metrics::MetricsParams = - data.prometheus_params.into_metrics_params()?; - GlobalMetrics::new()?.register_and_spawn(&metrics_params.registry)?; - - let target_transactions_params = substrate_relay_helper::TransactionParams { - signer: target_sign, - mortality: target_transactions_mortality, - }; - Self::Finality::start_relay_guards(&target_client, target_client.can_start_version_guard()) - .await?; - - substrate_relay_helper::finality::run::( - source_client, - target_client, - headers_to_relay, - target_transactions_params, - metrics_params, - ) - .await - } -} - impl HeadersRelayer for RococoToBridgeHubWestendCliBridge {} impl HeadersRelayer for WestendToBridgeHubRococoCliBridge {} impl HeadersRelayer for KusamaToBridgeHubPolkadotCliBridge {} @@ -139,17 +90,17 @@ impl RelayHeaders { RelayHeadersBridge::WestendToBridgeHubRococo => WestendToBridgeHubRococoCliBridge::relay_headers(self), RelayHeadersBridge::KusamaToBridgeHubPolkadot => - KusamaToBridgeHubPolkadotCliBridge::relay_headers(self), + KusamaToBridgeHubPolkadotCliBridge::relay_headers(self.params), RelayHeadersBridge::PolkadotToBridgeHubKusama => - PolkadotToBridgeHubKusamaCliBridge::relay_headers(self), + PolkadotToBridgeHubKusamaCliBridge::relay_headers(self.params), RelayHeadersBridge::PolkadotToPolkadotBulletin => - PolkadotToPolkadotBulletinCliBridge::relay_headers(self), + PolkadotToPolkadotBulletinCliBridge::relay_headers(self.params), RelayHeadersBridge::PolkadotBulletinToBridgeHubPolkadot => - PolkadotBulletinToBridgeHubPolkadotCliBridge::relay_headers(self), + PolkadotBulletinToBridgeHubPolkadotCliBridge::relay_headers(self.params), RelayHeadersBridge::RococoToRococoBulletin => - RococoToRococoBulletinCliBridge::relay_headers(self), + RococoToRococoBulletinCliBridge::relay_headers(self.params), RelayHeadersBridge::RococoBulletinToBridgeHubRococo => - RococoBulletinToBridgeHubRococoCliBridge::relay_headers(self), + RococoBulletinToBridgeHubRococoCliBridge::relay_headers(self.params), } .await } diff --git a/relays/bin-substrate/src/cli/relay_headers_and_messages/mod.rs b/substrate-relay/src/cli/relay_headers_and_messages.rs similarity index 51% rename from relays/bin-substrate/src/cli/relay_headers_and_messages/mod.rs rename to substrate-relay/src/cli/relay_headers_and_messages.rs index 969d7ac84075254897477b4bcaa4d91a50f30948..39877657884242246cb6e434ca0a23cdd55c3fbf 100644 --- a/relays/bin-substrate/src/cli/relay_headers_and_messages/mod.rs +++ b/substrate-relay/src/cli/relay_headers_and_messages.rs @@ -23,190 +23,48 @@ //! `declare_chain_to_parachain_bridge_schema` for the bridge. //! 3) declare a new struct for the added bridge and implement the `Full2WayBridge` trait for it. -#[macro_use] -mod parachain_to_parachain; -#[macro_use] -mod relay_to_relay; -#[macro_use] -mod relay_to_parachain; - use async_trait::async_trait; -use std::{marker::PhantomData, sync::Arc}; use structopt::StructOpt; -use futures::{FutureExt, TryFutureExt}; -use relay_to_parachain::*; - -use crate::{ - bridges::{ - kusama_polkadot::{ - kusama_parachains_to_bridge_hub_polkadot::BridgeHubKusamaToBridgeHubPolkadotCliBridge, - polkadot_parachains_to_bridge_hub_kusama::BridgeHubPolkadotToBridgeHubKusamaCliBridge, - }, - polkadot_bulletin::{ - polkadot_bulletin_headers_to_bridge_hub_polkadot::PolkadotBulletinToBridgeHubPolkadotCliBridge, - polkadot_parachains_to_polkadot_bulletin::PolkadotToPolkadotBulletinCliBridge, - }, - rococo_bulletin::{ - rococo_bulletin_headers_to_bridge_hub_rococo::RococoBulletinToBridgeHubRococoCliBridge, - rococo_parachains_to_rococo_bulletin::RococoToRococoBulletinCliBridge, - BridgeHubRococoAsBridgeHubPolkadot, - }, - rococo_westend::{ - rococo_parachains_to_bridge_hub_westend::BridgeHubRococoToBridgeHubWestendCliBridge, - westend_parachains_to_bridge_hub_rococo::BridgeHubWestendToBridgeHubRococoCliBridge, - }, +use crate::bridges::{ + kusama_polkadot::{ + kusama_parachains_to_bridge_hub_polkadot::BridgeHubKusamaToBridgeHubPolkadotCliBridge, + polkadot_parachains_to_bridge_hub_kusama::BridgeHubPolkadotToBridgeHubKusamaCliBridge, + }, + polkadot_bulletin::{ + polkadot_bulletin_headers_to_bridge_hub_polkadot::PolkadotBulletinToBridgeHubPolkadotCliBridge, + polkadot_parachains_to_polkadot_bulletin::PolkadotToPolkadotBulletinCliBridge, + }, + rococo_bulletin::{ + rococo_bulletin_headers_to_bridge_hub_rococo::RococoBulletinToBridgeHubRococoCliBridge, + rococo_parachains_to_rococo_bulletin::RococoToRococoBulletinCliBridge, + BridgeHubRococoAsBridgeHubPolkadot, }, + rococo_westend::{ + rococo_parachains_to_bridge_hub_westend::BridgeHubRococoToBridgeHubWestendCliBridge, + westend_parachains_to_bridge_hub_rococo::BridgeHubWestendToBridgeHubRococoCliBridge, + }, +}; +use relay_substrate_client::{ + AccountKeyPairOf, ChainRuntimeVersion, ChainWithRuntimeVersion, ChainWithTransactions, + Parachain, SimpleRuntimeVersion, +}; +use substrate_relay_helper::{ cli::{ bridge::{ CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge, RelayToRelayHeadersCliBridge, }, chain_schema::*, - relay_headers_and_messages::parachain_to_parachain::ParachainToParachainBridge, - CliChain, HexLaneId, PrometheusParams, + relay_headers_and_messages::{ + parachain_to_parachain::ParachainToParachainBridge, relay_to_parachain::*, + BridgeEndCommonParams, Full2WayBridge, Full2WayBridgeCommonParams, + HeadersAndMessagesSharedParams, + }, }, - declare_chain_cli_schema, + declare_chain_cli_schema, declare_parachain_to_parachain_bridge_schema, + declare_relay_to_parachain_bridge_schema, TransactionParams, }; -use bp_messages::LaneId; -use bp_runtime::BalanceOf; -use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, Chain, ChainWithBalances, ChainWithMessages, - ChainWithTransactions, Client, Parachain, -}; -use relay_utils::metrics::MetricsParams; -use sp_core::Pair; -use substrate_relay_helper::{ - messages_lane::{MessagesRelayLimits, MessagesRelayParams}, - on_demand::OnDemandRelay, - HeadersToRelay, TaggedAccount, TransactionParams, -}; - -/// Parameters that have the same names across all bridges. -#[derive(Debug, PartialEq, StructOpt)] -pub struct HeadersAndMessagesSharedParams { - /// Hex-encoded lane identifiers that should be served by the complex relay. - #[structopt(long, default_value = "00000000")] - pub lane: Vec, - /// If passed, only mandatory headers (headers that are changing the GRANDPA authorities set) - /// are relayed. - #[structopt(long)] - pub only_mandatory_headers: bool, - /// If passed, only free headers (mandatory and every Nth header, if configured in runtime) - /// are relayed. Overrides `only_mandatory_headers`. - #[structopt(long)] - pub only_free_headers: bool, - #[structopt(flatten)] - pub prometheus_params: PrometheusParams, -} - -impl HeadersAndMessagesSharedParams { - fn headers_to_relay(&self) -> HeadersToRelay { - match (self.only_mandatory_headers, self.only_free_headers) { - (_, true) => HeadersToRelay::Free, - (true, false) => HeadersToRelay::Mandatory, - _ => HeadersToRelay::All, - } - } -} - -/// Bridge parameters, shared by all bridge types. -pub struct Full2WayBridgeCommonParams< - Left: ChainWithTransactions + CliChain, - Right: ChainWithTransactions + CliChain, -> { - /// Shared parameters. - pub shared: HeadersAndMessagesSharedParams, - /// Parameters of the left chain. - pub left: BridgeEndCommonParams, - /// Parameters of the right chain. - pub right: BridgeEndCommonParams, - - /// Common metric parameters. - pub metrics_params: MetricsParams, -} - -impl - Full2WayBridgeCommonParams -{ - /// Creates new bridge parameters from its components. - pub fn new>( - shared: HeadersAndMessagesSharedParams, - left: BridgeEndCommonParams, - right: BridgeEndCommonParams, - ) -> anyhow::Result { - // Create metrics registry. - let metrics_params = shared.prometheus_params.clone().into_metrics_params()?; - let metrics_params = relay_utils::relay_metrics(metrics_params).into_params(); - - Ok(Self { shared, left, right, metrics_params }) - } -} - -/// Parameters that are associated with one side of the bridge. -pub struct BridgeEndCommonParams { - /// Chain client. - pub client: Client, - /// Params used for sending transactions to the chain. - pub tx_params: TransactionParams>, - /// Accounts, which balances are exposed as metrics by the relay process. - pub accounts: Vec>>, -} - -/// All data of the bidirectional complex relay. -struct FullBridge< - 'a, - Source: ChainWithTransactions + CliChain, - Target: ChainWithTransactions + CliChain, - Bridge: MessagesCliBridge, -> { - source: &'a mut BridgeEndCommonParams, - target: &'a mut BridgeEndCommonParams, - metrics_params: &'a MetricsParams, - _phantom_data: PhantomData, -} - -impl< - 'a, - Source: ChainWithTransactions + CliChain, - Target: ChainWithTransactions + CliChain, - Bridge: MessagesCliBridge, - > FullBridge<'a, Source, Target, Bridge> -where - AccountIdOf: From< as Pair>::Public>, - AccountIdOf: From< as Pair>::Public>, - BalanceOf: TryFrom> + Into, -{ - /// Construct complex relay given it components. - fn new( - source: &'a mut BridgeEndCommonParams, - target: &'a mut BridgeEndCommonParams, - metrics_params: &'a MetricsParams, - ) -> Self { - Self { source, target, metrics_params, _phantom_data: Default::default() } - } - - /// Returns message relay parameters. - fn messages_relay_params( - &self, - source_to_target_headers_relay: Arc>, - target_to_source_headers_relay: Arc>, - lane_id: LaneId, - maybe_limits: Option, - ) -> MessagesRelayParams { - MessagesRelayParams { - source_client: self.source.client.clone(), - source_transaction_params: self.source.tx_params.clone(), - target_client: self.target.client.clone(), - target_transaction_params: self.target.tx_params.clone(), - source_to_target_headers_relay: Some(source_to_target_headers_relay), - target_to_source_headers_relay: Some(target_to_source_headers_relay), - lane_id, - limits: maybe_limits, - metrics_params: self.metrics_params.clone().disable(), - } - } -} // All supported chains. declare_chain_cli_schema!(Rococo, rococo); @@ -261,171 +119,6 @@ declare_parachain_to_parachain_bridge_schema!(BridgeHubKusama, Kusama, BridgeHub declare_relay_to_parachain_bridge_schema!(PolkadotBulletin, BridgeHubPolkadot, Polkadot); declare_relay_to_parachain_bridge_schema!(RococoBulletin, BridgeHubRococo, Rococo); -/// Base portion of the bidirectional complex relay. -/// -/// This main purpose of extracting this trait is that in different relays the implementation -/// of `start_on_demand_headers_relayers` method will be different. But the number of -/// implementations is limited to relay <> relay, parachain <> relay and parachain <> parachain. -/// This trait allows us to reuse these implementations in different bridges. -#[async_trait] -trait Full2WayBridgeBase: Sized + Send + Sync { - /// The CLI params for the bridge. - type Params; - /// The left relay chain. - type Left: ChainWithTransactions + CliChain; - /// The right destination chain (it can be a relay or a parachain). - type Right: ChainWithTransactions + CliChain; - - /// Reference to common relay parameters. - fn common(&self) -> &Full2WayBridgeCommonParams; - - /// Mutable reference to common relay parameters. - fn mut_common(&mut self) -> &mut Full2WayBridgeCommonParams; - - /// Start on-demand headers relays. - async fn start_on_demand_headers_relayers( - &mut self, - ) -> anyhow::Result<( - Arc>, - Arc>, - )>; -} - -/// Bidirectional complex relay. -#[async_trait] -trait Full2WayBridge: Sized + Sync -where - AccountIdOf: From< as Pair>::Public>, - AccountIdOf: From< as Pair>::Public>, - BalanceOf: TryFrom> + Into, - BalanceOf: TryFrom> + Into, -{ - /// Base portion of the bidirectional complex relay. - type Base: Full2WayBridgeBase; - - /// The left relay chain. - type Left: ChainWithTransactions + ChainWithBalances + ChainWithMessages + CliChain; - /// The right relay chain. - type Right: ChainWithTransactions + ChainWithBalances + ChainWithMessages + CliChain; - - /// Left to Right bridge. - type L2R: MessagesCliBridge; - /// Right to Left bridge - type R2L: MessagesCliBridge; - - /// Construct new bridge. - fn new(params: ::Params) -> anyhow::Result; - - /// Reference to the base relay portion. - fn base(&self) -> &Self::Base; - - /// Mutable reference to the base relay portion. - fn mut_base(&mut self) -> &mut Self::Base; - - /// Creates and returns Left to Right complex relay. - fn left_to_right(&mut self) -> FullBridge { - let common = self.mut_base().mut_common(); - FullBridge::<_, _, Self::L2R>::new( - &mut common.left, - &mut common.right, - &common.metrics_params, - ) - } - - /// Creates and returns Right to Left complex relay. - fn right_to_left(&mut self) -> FullBridge { - let common = self.mut_base().mut_common(); - FullBridge::<_, _, Self::R2L>::new( - &mut common.right, - &mut common.left, - &common.metrics_params, - ) - } - - /// Start complex relay. - async fn run(&mut self) -> anyhow::Result<()> { - // Register standalone metrics. - { - let common = self.mut_base().mut_common(); - common.left.accounts.push(TaggedAccount::Messages { - id: common.left.tx_params.signer.public().into(), - bridged_chain: Self::Right::NAME.to_string(), - }); - common.right.accounts.push(TaggedAccount::Messages { - id: common.right.tx_params.signer.public().into(), - bridged_chain: Self::Left::NAME.to_string(), - }); - } - - // start on-demand header relays - let (left_to_right_on_demand_headers, right_to_left_on_demand_headers) = - self.mut_base().start_on_demand_headers_relayers().await?; - - // add balance-related metrics - let lanes = self - .base() - .common() - .shared - .lane - .iter() - .cloned() - .map(Into::into) - .collect::>(); - { - let common = self.mut_base().mut_common(); - substrate_relay_helper::messages_metrics::add_relay_balances_metrics::<_, Self::Right>( - common.left.client.clone(), - &common.metrics_params, - &common.left.accounts, - &lanes, - ) - .await?; - substrate_relay_helper::messages_metrics::add_relay_balances_metrics::<_, Self::Left>( - common.right.client.clone(), - &common.metrics_params, - &common.right.accounts, - &lanes, - ) - .await?; - } - - // Need 2x capacity since we consider both directions for each lane - let mut message_relays = Vec::with_capacity(lanes.len() * 2); - for lane in lanes { - let left_to_right_messages = substrate_relay_helper::messages_lane::run::< - ::MessagesLane, - >(self.left_to_right().messages_relay_params( - left_to_right_on_demand_headers.clone(), - right_to_left_on_demand_headers.clone(), - lane, - Self::L2R::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); - message_relays.push(left_to_right_messages); - - let right_to_left_messages = substrate_relay_helper::messages_lane::run::< - ::MessagesLane, - >(self.right_to_left().messages_relay_params( - right_to_left_on_demand_headers.clone(), - left_to_right_on_demand_headers.clone(), - lane, - Self::R2L::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); - message_relays.push(right_to_left_messages); - } - - relay_utils::relay_metrics(self.base().common().metrics_params.clone()) - .expose() - .await - .map_err(|e| anyhow::format_err!("{}", e))?; - - futures::future::select_all(message_relays).await.0 - } -} - /// BridgeHubRococo <> BridgeHubWestend complex relay. pub struct BridgeHubRococoBridgeHubWestendFull2WayBridge { base: ::Base, @@ -570,6 +263,7 @@ impl RelayHeadersAndMessages { #[cfg(test)] mod tests { use super::*; + use substrate_relay_helper::cli::{HexLaneId, PrometheusParams}; #[test] fn should_parse_parachain_to_parachain_options() { @@ -623,8 +317,10 @@ mod tests { }, }, left_relay: KusamaConnectionParams { + kusama_uri: None, kusama_host: "kusama-alice".into(), kusama_port: 9944, + kusama_path: None, kusama_secure: false, kusama_runtime_version: KusamaRuntimeVersionParams { kusama_version_mode: RuntimeVersionType::Bundle, @@ -633,8 +329,10 @@ mod tests { }, }, left: BridgeHubKusamaConnectionParams { + bridge_hub_kusama_uri: None, bridge_hub_kusama_host: "bridge-hub-kusama-node-collator1".into(), bridge_hub_kusama_port: 9944, + bridge_hub_kusama_path: None, bridge_hub_kusama_secure: false, bridge_hub_kusama_runtime_version: BridgeHubKusamaRuntimeVersionParams { bridge_hub_kusama_version_mode: RuntimeVersionType::Bundle, @@ -650,8 +348,10 @@ mod tests { bridge_hub_kusama_transactions_mortality: Some(64), }, right: BridgeHubPolkadotConnectionParams { + bridge_hub_polkadot_uri: None, bridge_hub_polkadot_host: "bridge-hub-polkadot-collator1".into(), bridge_hub_polkadot_port: 9944, + bridge_hub_polkadot_path: None, bridge_hub_polkadot_secure: false, bridge_hub_polkadot_runtime_version: BridgeHubPolkadotRuntimeVersionParams { @@ -668,8 +368,10 @@ mod tests { bridge_hub_polkadot_transactions_mortality: Some(64), }, right_relay: PolkadotConnectionParams { + polkadot_uri: None, polkadot_host: "polkadot-alice".into(), polkadot_port: 9944, + polkadot_path: None, polkadot_secure: false, polkadot_runtime_version: PolkadotRuntimeVersionParams { polkadot_version_mode: RuntimeVersionType::Bundle, diff --git a/relays/bin-substrate/src/cli/relay_messages.rs b/substrate-relay/src/cli/relay_messages.rs similarity index 60% rename from relays/bin-substrate/src/cli/relay_messages.rs rename to substrate-relay/src/cli/relay_messages.rs index b20725b53c743255968dd5667a73f31ff53ea4f2..92b98f4d983a33a9b6278fb398ec7cee0da56b12 100644 --- a/relays/bin-substrate/src/cli/relay_messages.rs +++ b/substrate-relay/src/cli/relay_messages.rs @@ -14,10 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use async_trait::async_trait; -use sp_core::Pair; use structopt::StructOpt; -use strum::VariantNames; +use strum::{EnumString, VariantNames}; use crate::bridges::{ kusama_polkadot::{ @@ -37,10 +35,21 @@ use crate::bridges::{ bridge_hub_westend_messages_to_bridge_hub_rococo::BridgeHubWestendToBridgeHubRococoMessagesCliBridge, }, }; -use relay_substrate_client::{AccountIdOf, AccountKeyPairOf, BalanceOf, ChainWithTransactions}; -use substrate_relay_helper::{messages_lane::MessagesRelayParams, TransactionParams}; +use substrate_relay_helper::cli::relay_messages::{MessagesRelayer, RelayMessagesParams}; -use crate::cli::{bridge::*, chain_schema::*, CliChain, HexLaneId, PrometheusParams}; +#[derive(Debug, PartialEq, Eq, EnumString, VariantNames)] +#[strum(serialize_all = "kebab_case")] +/// Supported full bridges (headers + messages). +pub enum FullBridge { + BridgeHubRococoToBridgeHubWestend, + BridgeHubWestendToBridgeHubRococo, + BridgeHubKusamaToBridgeHubPolkadot, + BridgeHubPolkadotToBridgeHubKusama, + PolkadotBulletinToBridgeHubPolkadot, + BridgeHubPolkadotToPolkadotBulletin, + RococoBulletinToBridgeHubRococo, + BridgeHubRococoToRococoBulletin, +} /// Start messages relayer process. #[derive(StructOpt)] @@ -48,57 +57,8 @@ pub struct RelayMessages { /// A bridge instance to relay messages for. #[structopt(possible_values = FullBridge::VARIANTS, case_insensitive = true)] bridge: FullBridge, - /// Hex-encoded lane id that should be served by the relay. Defaults to `00000000`. - #[structopt(long, default_value = "00000000")] - lane: HexLaneId, - #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - source_sign: SourceSigningParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, #[structopt(flatten)] - prometheus_params: PrometheusParams, -} - -#[async_trait] -trait MessagesRelayer: MessagesCliBridge -where - Self::Source: ChainWithTransactions + CliChain, - AccountIdOf: From< as Pair>::Public>, - AccountIdOf: From< as Pair>::Public>, - BalanceOf: TryFrom>, -{ - async fn relay_messages(data: RelayMessages) -> anyhow::Result<()> { - let source_client = data.source.into_client::().await?; - let source_sign = data.source_sign.to_keypair::()?; - let source_transactions_mortality = data.source_sign.transactions_mortality()?; - let target_client = data.target.into_client::().await?; - let target_sign = data.target_sign.to_keypair::()?; - let target_transactions_mortality = data.target_sign.transactions_mortality()?; - - substrate_relay_helper::messages_lane::run::(MessagesRelayParams { - source_client, - source_transaction_params: TransactionParams { - signer: source_sign, - mortality: source_transactions_mortality, - }, - target_client, - target_transaction_params: TransactionParams { - signer: target_sign, - mortality: target_transactions_mortality, - }, - source_to_target_headers_relay: None, - target_to_source_headers_relay: None, - lane_id: data.lane.into(), - limits: Self::maybe_messages_limits(), - metrics_params: data.prometheus_params.into_metrics_params()?, - }) - .await - .map_err(|e| anyhow::format_err!("{}", e)) - } + params: RelayMessagesParams, } impl MessagesRelayer for BridgeHubRococoToBridgeHubWestendMessagesCliBridge {} @@ -115,21 +75,21 @@ impl RelayMessages { pub async fn run(self) -> anyhow::Result<()> { match self.bridge { FullBridge::BridgeHubRococoToBridgeHubWestend => - BridgeHubRococoToBridgeHubWestendMessagesCliBridge::relay_messages(self), + BridgeHubRococoToBridgeHubWestendMessagesCliBridge::relay_messages(self.params), FullBridge::BridgeHubWestendToBridgeHubRococo => - BridgeHubWestendToBridgeHubRococoMessagesCliBridge::relay_messages(self), + BridgeHubWestendToBridgeHubRococoMessagesCliBridge::relay_messages(self.params), FullBridge::BridgeHubKusamaToBridgeHubPolkadot => - BridgeHubKusamaToBridgeHubPolkadotMessagesCliBridge::relay_messages(self), + BridgeHubKusamaToBridgeHubPolkadotMessagesCliBridge::relay_messages(self.params), FullBridge::BridgeHubPolkadotToBridgeHubKusama => - BridgeHubPolkadotToBridgeHubKusamaMessagesCliBridge::relay_messages(self), + BridgeHubPolkadotToBridgeHubKusamaMessagesCliBridge::relay_messages(self.params), FullBridge::PolkadotBulletinToBridgeHubPolkadot => - PolkadotBulletinToBridgeHubPolkadotMessagesCliBridge::relay_messages(self), + PolkadotBulletinToBridgeHubPolkadotMessagesCliBridge::relay_messages(self.params), FullBridge::BridgeHubPolkadotToPolkadotBulletin => - BridgeHubPolkadotToPolkadotBulletinMessagesCliBridge::relay_messages(self), + BridgeHubPolkadotToPolkadotBulletinMessagesCliBridge::relay_messages(self.params), FullBridge::RococoBulletinToBridgeHubRococo => - RococoBulletinToBridgeHubRococoMessagesCliBridge::relay_messages(self), + RococoBulletinToBridgeHubRococoMessagesCliBridge::relay_messages(self.params), FullBridge::BridgeHubRococoToRococoBulletin => - BridgeHubRococoToRococoBulletinMessagesCliBridge::relay_messages(self), + BridgeHubRococoToRococoBulletinMessagesCliBridge::relay_messages(self.params), } .await } diff --git a/relays/bin-substrate/src/cli/relay_parachains.rs b/substrate-relay/src/cli/relay_parachains.rs similarity index 55% rename from relays/bin-substrate/src/cli/relay_parachains.rs rename to substrate-relay/src/cli/relay_parachains.rs index 44ca8563dc4f23b2d986b43124e7f7c745d662bf..65382d1ca11b9cb68472615f15c15af9b5756b79 100644 --- a/relays/bin-substrate/src/cli/relay_parachains.rs +++ b/substrate-relay/src/cli/relay_parachains.rs @@ -26,24 +26,9 @@ use crate::bridges::{ westend_parachains_to_bridge_hub_rococo::BridgeHubWestendToBridgeHubRococoCliBridge, }, }; -use async_std::sync::Mutex; -use async_trait::async_trait; -use parachains_relay::parachains_loop::{AvailableHeader, SourceClient, TargetClient}; -use relay_substrate_client::Parachain; -use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; -use std::sync::Arc; use structopt::StructOpt; use strum::{EnumString, VariantNames}; -use substrate_relay_helper::{ - parachains::{source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter}, - TransactionParams, -}; - -use crate::cli::{ - bridge::{CliBridgeBase, ParachainToRelayHeadersCliBridge}, - chain_schema::*, - PrometheusParams, -}; +use substrate_relay_helper::cli::relay_parachains::{ParachainsRelayer, RelayParachainsParams}; /// Start parachain heads relayer process. #[derive(StructOpt)] @@ -52,17 +37,7 @@ pub struct RelayParachains { #[structopt(possible_values = RelayParachainsBridge::VARIANTS, case_insensitive = true)] bridge: RelayParachainsBridge, #[structopt(flatten)] - source: SourceConnectionParams, - #[structopt(flatten)] - target: TargetConnectionParams, - #[structopt(flatten)] - target_sign: TargetSigningParams, - /// If passed, only free headers (those, available at "free" relay chain headers) - /// are relayed. - #[structopt(long)] - only_free_headers: bool, - #[structopt(flatten)] - prometheus_params: PrometheusParams, + params: RelayParachainsParams, } /// Parachain heads relay bridge. @@ -77,49 +52,6 @@ pub enum RelayParachainsBridge { WestendToBridgeHubRococo, } -#[async_trait] -trait ParachainsRelayer: ParachainToRelayHeadersCliBridge -where - ParachainsSource: - SourceClient>, - ParachainsTarget: - TargetClient>, - ::Source: Parachain, -{ - async fn relay_parachains(data: RelayParachains) -> anyhow::Result<()> { - let source_chain_client = data.source.into_client::().await?; - let source_client = ParachainsSource::::new( - source_chain_client.clone(), - Arc::new(Mutex::new(AvailableHeader::Missing)), - ); - - let target_transaction_params = TransactionParams { - signer: data.target_sign.to_keypair::()?, - mortality: data.target_sign.target_transactions_mortality, - }; - let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( - source_chain_client, - target_chain_client, - target_transaction_params, - ); - - let metrics_params: relay_utils::metrics::MetricsParams = - data.prometheus_params.into_metrics_params()?; - GlobalMetrics::new()?.register_and_spawn(&metrics_params.registry)?; - - parachains_relay::parachains_loop::run( - source_client, - target_client, - metrics_params, - data.only_free_headers, - futures::future::pending(), - ) - .await - .map_err(|e| anyhow::format_err!("{}", e)) - } -} - impl ParachainsRelayer for BridgeHubRococoToBridgeHubWestendCliBridge {} impl ParachainsRelayer for BridgeHubWestendToBridgeHubRococoCliBridge {} impl ParachainsRelayer for BridgeHubKusamaToBridgeHubPolkadotCliBridge {} @@ -132,17 +64,17 @@ impl RelayParachains { pub async fn run(self) -> anyhow::Result<()> { match self.bridge { RelayParachainsBridge::RococoToBridgeHubWestend => - BridgeHubRococoToBridgeHubWestendCliBridge::relay_parachains(self), + BridgeHubRococoToBridgeHubWestendCliBridge::relay_parachains(self.params), RelayParachainsBridge::WestendToBridgeHubRococo => - BridgeHubWestendToBridgeHubRococoCliBridge::relay_parachains(self), + BridgeHubWestendToBridgeHubRococoCliBridge::relay_parachains(self.params), RelayParachainsBridge::KusamaToBridgeHubPolkadot => - BridgeHubKusamaToBridgeHubPolkadotCliBridge::relay_parachains(self), + BridgeHubKusamaToBridgeHubPolkadotCliBridge::relay_parachains(self.params), RelayParachainsBridge::PolkadotToBridgeHubKusama => - BridgeHubPolkadotToBridgeHubKusamaCliBridge::relay_parachains(self), + BridgeHubPolkadotToBridgeHubKusamaCliBridge::relay_parachains(self.params), RelayParachainsBridge::PolkadotToPolkadotBulletin => - PolkadotToPolkadotBulletinCliBridge::relay_parachains(self), + PolkadotToPolkadotBulletinCliBridge::relay_parachains(self.params), RelayParachainsBridge::RococoToRococoBulletin => - RococoToRococoBulletinCliBridge::relay_parachains(self), + RococoToRococoBulletinCliBridge::relay_parachains(self.params), } .await } diff --git a/relays/bin-substrate/src/main.rs b/substrate-relay/src/main.rs similarity index 98% rename from relays/bin-substrate/src/main.rs rename to substrate-relay/src/main.rs index 33a423b0766283ffad308cad6eb85eff138fee8a..214bfa60e24a4c06b3277476a383f756570b8c5b 100644 --- a/relays/bin-substrate/src/main.rs +++ b/substrate-relay/src/main.rs @@ -19,7 +19,6 @@ #![warn(missing_docs)] mod bridges; -mod chains; mod cli; fn main() { diff --git a/tools/runtime-codegen/Cargo.lock b/tools/runtime-codegen/Cargo.lock index 0307d37cb860bec0246d4a479d859f831746c48a..ded1a4cb43b915047f963661ffa407b44cdfc5b6 100644 --- a/tools/runtime-codegen/Cargo.lock +++ b/tools/runtime-codegen/Cargo.lock @@ -104,9 +104,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -756,9 +756,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac495e00dcec98c83465d5ad66c5c4fabd652fd6686e7c6269b117e729a6f17b" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -766,9 +766,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.7" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c77ed9a32a62e6ca27175d00d29d05ca32e396ea1eb5fb01d8256b669cec7663" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -778,11 +778,11 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.38", @@ -790,9 +790,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "color-eyre" @@ -1738,6 +1738,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.3.3" @@ -2920,9 +2926,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.8" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring 0.17.5", @@ -3955,9 +3961,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" [[package]] name = "subrpcer" @@ -4011,7 +4017,7 @@ version = "0.32.1" source = "git+https://github.com/paritytech/subxt?branch=master#40aca5ba65f1181e8496eb91615d73c0d3c01502" dependencies = [ "frame-metadata 16.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "heck", + "heck 0.4.1", "hex", "jsonrpsee", "parity-scale-codec", diff --git a/tools/runtime-codegen/Cargo.toml b/tools/runtime-codegen/Cargo.toml index 24fe717f30f157fd3c901966da27840d94906dbf..3787d173af31af9eaa645e716d7c8298938dfc35 100644 --- a/tools/runtime-codegen/Cargo.toml +++ b/tools/runtime-codegen/Cargo.toml @@ -5,14 +5,15 @@ description = "Tool for generating bridge runtime code from metadata" authors = ["Parity Technologies "] edition = "2021" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +repository = "https://github.com/paritytech/parity-bridges-common.git" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [workspace] [dependencies] -clap = { version = "4.4.6", features = ["derive", "cargo"] } -codec = { package = "parity-scale-codec", version = "3.1.5", default-features = false, features = ["derive"] } +clap = { version = "4.5.3", features = ["derive", "cargo"] } +codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false, features = ["derive"] } color-eyre = "0.6.1" proc-macro2 = "1.0.56" quote = { version = "1.0.33" } diff --git a/zombienet/README.md b/zombienet/README.md deleted file mode 100644 index b601154b624ce69ed921ea6c2453d17c4d37b6c8..0000000000000000000000000000000000000000 --- a/zombienet/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Bridges Tests for Local Rococo <> Westend Bridge - -This folder contains [zombienet](https://github.com/paritytech/zombienet/) based integration tests for both -onchain and offchain bridges code. Due to some -[technical difficulties](https://github.com/paritytech/parity-bridges-common/pull/2649#issue-1965339051), we -are using native zombienet provider, which means that you need to build some binaries locally. - -To start those tests, you need to: - -- download latest [zombienet release](https://github.com/paritytech/zombienet/releases); - -- build Polkadot binary by running `cargo build -p polkadot --release --features fast-runtime` command in the -[`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; - -- build Polkadot Parachain binary by running `cargo build -p polkadot-parachain-bin --release` command in the -[`polkadot-sdk`](https://github.com/paritytech/polkadot-sdk) repository clone; - -- ensure that you have [`node`](https://nodejs.org/en) installed. Additionally, we'll need globally installed -`polkadot/api-cli` package (use `npm install -g @polkadot/api-cli@beta` to install it); - -- build Substrate relay by running `cargo build -p substrate-relay --release` command in the -[`parity-bridges-common`](https://github.com/paritytech/parity-bridges-common) repository clone. - -- copy fresh `substrate-relay` binary, built in previous point, to the `~/local_bridge_testing/bin/substrate-relay`; - -- change the `POLKADOT_SDK_FOLDER` and `ZOMBIENET_BINARY_PATH` (and ensure that the nearby variables -have correct values) in the `./run-tests.sh`. - -After that, you could run tests with the `./run-tests.sh` command. Hopefully, it'll show the -"All tests have completed successfully" message in the end. Otherwise, it'll print paths to zombienet -process logs, which, in turn, may be used to track locations of all spinned relay and parachain nodes. diff --git a/zombienet/helpers/best-finalized-header-at-bridged-chain.js b/zombienet/helpers/best-finalized-header-at-bridged-chain.js deleted file mode 100644 index f7e1eefc84b3fa3e799d7111608cfc39783f5e21..0000000000000000000000000000000000000000 --- a/zombienet/helpers/best-finalized-header-at-bridged-chain.js +++ /dev/null @@ -1,25 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later - const bridgedChainName = args[0]; - const expectedBridgedChainHeaderNumber = Number(args[1]); - const runtimeApiMethod = bridgedChainName + "FinalityApi_best_finalized"; - - while (true) { - const encodedBestFinalizedHeaderId = await api.rpc.state.call(runtimeApiMethod, []); - const bestFinalizedHeaderId = api.createType("Option", encodedBestFinalizedHeaderId); - if (bestFinalizedHeaderId.isSome) { - const bestFinalizedHeaderNumber = Number(bestFinalizedHeaderId.unwrap().toHuman()[0]); - if (bestFinalizedHeaderNumber > expectedBridgedChainHeaderNumber) { - return bestFinalizedHeaderNumber; - } - } - - // else sleep and retry - await new Promise((resolve) => setTimeout(resolve, 12000)); - } -} - -module.exports = { run } diff --git a/zombienet/helpers/chains/rococo-at-westend.js b/zombienet/helpers/chains/rococo-at-westend.js deleted file mode 100644 index bcce3b3a303f55a16e766c6558878650ed03ab80..0000000000000000000000000000000000000000 --- a/zombienet/helpers/chains/rococo-at-westend.js +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - grandpaPalletName: "bridgeRococoGrandpa", - parachainsPalletName: "bridgeRococoParachains", - messagesPalletName: "bridgeRococoMessages", - bridgedBridgeHubParaId: 1013, -} diff --git a/zombienet/helpers/chains/westend-at-rococo.js b/zombienet/helpers/chains/westend-at-rococo.js deleted file mode 100644 index 6a15b64a23b7c28f2b66a6491caebafc4c93dff5..0000000000000000000000000000000000000000 --- a/zombienet/helpers/chains/westend-at-rococo.js +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - grandpaPalletName: "bridgeWestendGrandpa", - parachainsPalletName: "bridgeWestendParachains", - messagesPalletName: "bridgeWestendMessages", - bridgedBridgeHubParaId: 1002, -} diff --git a/zombienet/helpers/native-assets-balance-increased.js b/zombienet/helpers/native-assets-balance-increased.js deleted file mode 100644 index 9ee1a769e9f2807ed7b73ca9c6aa4b89d5c135f9..0000000000000000000000000000000000000000 --- a/zombienet/helpers/native-assets-balance-increased.js +++ /dev/null @@ -1,20 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - const accountAddress = args[0]; - const initialAccountData = await api.query.system.account(accountAddress); - const initialAccountBalance = initialAccountData.data['free']; - while (true) { - const accountData = await api.query.system.account(accountAddress); - const accountBalance = accountData.data['free']; - if (accountBalance > initialAccountBalance) { - return accountBalance; - } - - // else sleep and retry - await new Promise((resolve) => setTimeout(resolve, 12000)); - } -} - -module.exports = { run } diff --git a/zombienet/helpers/only-mandatory-headers-synced-when-idle.js b/zombienet/helpers/only-mandatory-headers-synced-when-idle.js deleted file mode 100644 index 3a3432cfaf38da93f3ea0e65657f266b66f84d74..0000000000000000000000000000000000000000 --- a/zombienet/helpers/only-mandatory-headers-synced-when-idle.js +++ /dev/null @@ -1,44 +0,0 @@ -const utils = require("./utils"); - -async function run(nodeName, networkInfo, args) { - const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - // parse arguments - const exitAfterSeconds = Number(args[0]); - const bridgedChain = require("./chains/" + args[1]); - - // start listening to new blocks - let totalGrandpaHeaders = 0; - let totalParachainHeaders = 0; - api.rpc.chain.subscribeNewHeads(async function (header) { - const apiAtParent = await api.at(header.parentHash); - const apiAtCurrent = await api.at(header.hash); - const currentEvents = await apiAtCurrent.query.system.events(); - - totalGrandpaHeaders += await utils.ensureOnlyMandatoryGrandpaHeadersImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); - totalParachainHeaders += await utils.ensureOnlyInitialParachainHeaderImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); - }); - - // wait given time - await new Promise(resolve => setTimeout(resolve, exitAfterSeconds * 1000)); - // if we haven't seen any new GRANDPA or parachain headers => fail - if (totalGrandpaHeaders == 0) { - throw new Error("No bridged relay chain headers imported"); - } - if (totalParachainHeaders == 0) { - throw new Error("No bridged parachain headers imported"); - } -} - -module.exports = { run } diff --git a/zombienet/helpers/only-required-headers-synced-when-idle.js b/zombienet/helpers/only-required-headers-synced-when-idle.js deleted file mode 100644 index 8c3130e4fd960601d377dde5101520c95531cdf6..0000000000000000000000000000000000000000 --- a/zombienet/helpers/only-required-headers-synced-when-idle.js +++ /dev/null @@ -1,81 +0,0 @@ -const utils = require("./utils"); - -async function run(nodeName, networkInfo, args) { - const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - // parse arguments - const exitAfterSeconds = Number(args[0]); - const bridgedChain = require("./chains/" + args[1]); - - // start listening to new blocks - let atLeastOneMessageReceived = false; - let atLeastOneMessageDelivered = false; - const unsubscribe = await api.rpc.chain.subscribeNewHeads(async function (header) { - const apiAtParent = await api.at(header.parentHash); - const apiAtCurrent = await api.at(header.hash); - const currentEvents = await apiAtCurrent.query.system.events(); - - const messagesReceived = currentEvents.find((record) => { - return record.event.section == bridgedChain.messagesPalletName - && record.event.method == "MessagesReceived"; - }) != undefined; - const messagesDelivered = currentEvents.find((record) => { - return record.event.section == bridgedChain.messagesPalletName && - record.event.method == "MessagesDelivered"; - }) != undefined; - const hasMessageUpdates = messagesReceived || messagesDelivered; - atLeastOneMessageReceived = atLeastOneMessageReceived || messagesReceived; - atLeastOneMessageDelivered = atLeastOneMessageDelivered || messagesDelivered; - - if (!hasMessageUpdates) { - // if there are no any message update transactions, we only expect mandatory GRANDPA - // headers and initial parachain headers - await utils.ensureOnlyMandatoryGrandpaHeadersImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); - await utils.ensureOnlyInitialParachainHeaderImported( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ); - } else { - const messageTransactions = (messagesReceived ? 1 : 0) + (messagesDelivered ? 1 : 0); - - // otherwise we only accept at most one GRANDPA header - const newGrandpaHeaders = utils.countGrandpaHeaderImports(bridgedChain, currentEvents); - if (newGrandpaHeaders > 1) { - utils.logEvents(currentEvents); - throw new Error("Unexpected relay chain header import: " + newGrandpaHeaders + " / " + messageTransactions); - } - - // ...and at most one parachain header - const newParachainHeaders = utils.countParachainHeaderImports(bridgedChain, currentEvents); - if (newParachainHeaders > 1) { - utils.logEvents(currentEvents); - throw new Error("Unexpected parachain header import: " + newParachainHeaders + " / " + messageTransactions); - } - } - }); - - // wait until we have received + delivered messages OR until timeout - await utils.pollUntil( - exitAfterSeconds, - () => { return atLeastOneMessageReceived && atLeastOneMessageDelivered; }, - () => { unsubscribe(); }, - () => { - if (!atLeastOneMessageReceived) { - throw new Error("No messages received from bridged chain"); - } - if (!atLeastOneMessageDelivered) { - throw new Error("No messages delivered to bridged chain"); - } - }, - ); -} - -module.exports = { run } diff --git a/zombienet/helpers/relayer-rewards.js b/zombienet/helpers/relayer-rewards.js deleted file mode 100644 index a5f567db797722e04d3bfae90745a728ff1abdff..0000000000000000000000000000000000000000 --- a/zombienet/helpers/relayer-rewards.js +++ /dev/null @@ -1,28 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later - const relayerAccountAddress = args[0]; - const laneId = args[1]; - const bridgedChainId = args[2]; - const relayerFundOwner = args[3]; - const expectedRelayerReward = BigInt(args[4]); - while (true) { - const relayerReward = await api.query.bridgeRelayers.relayerRewards( - relayerAccountAddress, - { laneId: laneId, bridgedChainId: bridgedChainId, owner: relayerFundOwner } - ); - if (relayerReward.isSome) { - const relayerRewardBalance = relayerReward.unwrap().toBigInt(); - if (relayerRewardBalance > expectedRelayerReward) { - return relayerRewardBalance; - } - } - - // else sleep and retry - await new Promise((resolve) => setTimeout(resolve, 12000)); - } -} - -module.exports = { run } diff --git a/zombienet/helpers/utils.js b/zombienet/helpers/utils.js deleted file mode 100644 index 5a5542b56dfc215a082fc6fbb8c1b9aa018de83e..0000000000000000000000000000000000000000 --- a/zombienet/helpers/utils.js +++ /dev/null @@ -1,103 +0,0 @@ -module.exports = { - logEvents: function(events) { - let stringifiedEvents = ""; - events.forEach((record) => { - if (stringifiedEvents != "") { - stringifiedEvents += ", "; - } - stringifiedEvents += record.event.section + "::" + record.event.method; - }); - console.log("Block events: " + stringifiedEvents); - }, - countGrandpaHeaderImports: function(bridgedChain, events) { - return events.reduce( - (count, record) => { - const { event } = record; - if (event.section == bridgedChain.grandpaPalletName && event.method == "UpdatedBestFinalizedHeader") { - count += 1; - } - return count; - }, - 0, - ); - }, - countParachainHeaderImports: function(bridgedChain, events) { - return events.reduce( - (count, record) => { - const { event } = record; - if (event.section == bridgedChain.parachainsPalletName && event.method == "UpdatedParachainHead") { - count += 1; - } - return count; - }, - 0, - ); - }, - pollUntil: async function( - timeoutInSecs, - predicate, - cleanup, - onFailure, - ) { - const begin = new Date().getTime(); - const end = begin + timeoutInSecs * 1000; - while (new Date().getTime() < end) { - if (predicate()) { - cleanup(); - return; - } - await new Promise(resolve => setTimeout(resolve, 100)); - } - - cleanup(); - onFailure(); - }, - ensureOnlyMandatoryGrandpaHeadersImported: async function( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ) { - // remember id of bridged relay chain GRANDPA authorities set at parent block - const authoritySetAtParent = await apiAtParent.query[bridgedChain.grandpaPalletName].currentAuthoritySet(); - const authoritySetIdAtParent = authoritySetAtParent["setId"]; - - // now read the id of bridged relay chain GRANDPA authorities set at current block - const authoritySetAtCurrent = await apiAtCurrent.query[bridgedChain.grandpaPalletName].currentAuthoritySet(); - const authoritySetIdAtCurrent = authoritySetAtCurrent["setId"]; - - // we expect to see no more than `authoritySetIdAtCurrent - authoritySetIdAtParent` new GRANDPA headers - const maxNewGrandpaHeaders = authoritySetIdAtCurrent - authoritySetIdAtParent; - const newGrandpaHeaders = module.exports.countGrandpaHeaderImports(bridgedChain, currentEvents); - - // check that our assumptions are correct - if (newGrandpaHeaders > maxNewGrandpaHeaders) { - module.exports.logEvents(currentEvents); - throw new Error("Unexpected relay chain header import: " + newGrandpaHeaders + " / " + maxNewGrandpaHeaders); - } - - return newGrandpaHeaders; - }, - ensureOnlyInitialParachainHeaderImported: async function( - bridgedChain, - apiAtParent, - apiAtCurrent, - currentEvents, - ) { - // remember whether we already know bridged parachain header at a parent block - const bestBridgedParachainHeader = await apiAtParent.query[bridgedChain.parachainsPalletName].parasInfo(bridgedChain.bridgedBridgeHubParaId);; - const hasBestBridgedParachainHeader = bestBridgedParachainHeader.isSome; - - // we expect to see: no more than `1` bridged parachain header if there were no parachain header before. - const maxNewParachainHeaders = hasBestBridgedParachainHeader ? 0 : 1; - const newParachainHeaders = module.exports.countParachainHeaderImports(bridgedChain, currentEvents); - - // check that our assumptions are correct - if (newParachainHeaders > maxNewParachainHeaders) { - module.exports.logEvents(currentEvents); - throw new Error("Unexpected parachain header import: " + newParachainHeaders + " / " + maxNewParachainHeaders); - } - - return newParachainHeaders; - }, -} diff --git a/zombienet/helpers/wait-hrmp-channel-opened.js b/zombienet/helpers/wait-hrmp-channel-opened.js deleted file mode 100644 index e700cab1d7481d77631e55492e4b0032f4382028..0000000000000000000000000000000000000000 --- a/zombienet/helpers/wait-hrmp-channel-opened.js +++ /dev/null @@ -1,22 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - const sibling = args[0]; - - while (true) { - const messagingStateAsObj = await api.query.parachainSystem.relevantMessagingState(); - const messagingState = api.createType("Option", messagingStateAsObj); - if (messagingState.isSome) { - const egressChannels = messagingState.unwrap().egressChannels; - if (egressChannels.find(x => x[0] == sibling)) { - return; - } - } - - // else sleep and retry - await new Promise((resolve) => setTimeout(resolve, 12000)); - } -} - -module.exports = { run } diff --git a/zombienet/helpers/wrapped-assets-balance.js b/zombienet/helpers/wrapped-assets-balance.js deleted file mode 100644 index bb3cea8858a850e551ba0380b1557ccad0761717..0000000000000000000000000000000000000000 --- a/zombienet/helpers/wrapped-assets-balance.js +++ /dev/null @@ -1,26 +0,0 @@ -async function run(nodeName, networkInfo, args) { - const {wsUri, userDefinedTypes} = networkInfo.nodesByName[nodeName]; - const api = await zombie.connect(wsUri, userDefinedTypes); - - // TODO: could be replaced with https://github.com/polkadot-js/api/issues/4930 (depends on metadata v15) later - const accountAddress = args[0]; - const expectedForeignAssetBalance = BigInt(args[1]); - const bridgedNetworkName = args[2]; - while (true) { - const foreignAssetAccount = await api.query.foreignAssets.account( - { parents: 2, interior: { X1: { GlobalConsensus: bridgedNetworkName } } }, - accountAddress - ); - if (foreignAssetAccount.isSome) { - const foreignAssetAccountBalance = foreignAssetAccount.unwrap().balance.toBigInt(); - if (foreignAssetAccountBalance > expectedForeignAssetBalance) { - return foreignAssetAccountBalance; - } - } - - // else sleep and retry - await new Promise((resolve) => setTimeout(resolve, 12000)); - } -} - -module.exports = { run } diff --git a/zombienet/run-tests.sh b/zombienet/run-tests.sh deleted file mode 100755 index cf3b529e6a9d9823f875938d8603b363c6079136..0000000000000000000000000000000000000000 --- a/zombienet/run-tests.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/bash -set -x -shopt -s nullglob - -trap "trap - SIGINT SIGTERM EXIT && killall -q -9 substrate-relay && kill -- -$$" SIGINT SIGTERM EXIT - -# run tests in range [TESTS_BEGIN; TESTS_END) -TESTS_BEGIN=1 -TESTS_END=1000 -# whether to use paths for zombienet+bridges tests container or for local testing -ZOMBIENET_DOCKER_PATHS=0 -while [ $# -ne 0 ] -do - arg="$1" - case "$arg" in - --docker) - ZOMBIENET_DOCKER_PATHS=1 - ;; - --test) - shift - TESTS_BEGIN="$1" - TESTS_END="$1" - ;; - esac - shift -done - -# assuming that we'll be using native provide && all processes will be executing locally -# (we need absolute paths here, because they're used when scripts are called by zombienet from tmp folders) -export POLKADOT_SDK_FOLDER=`realpath $(dirname "$0")/../..` -export BRIDGE_TESTS_FOLDER=$POLKADOT_SDK_FOLDER/bridges/zombienet/tests - -# set pathc to binaries -if [ "$ZOMBIENET_DOCKER_PATHS" -eq 1 ]; then - export POLKADOT_BINARY_PATH=/usr/local/bin/polkadot - export POLKADOT_PARACHAIN_BINARY_PATH=/usr/local/bin/polkadot-parachain - export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=/usr/local/bin/polkadot-parachain - export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=/usr/local/bin/polkadot-parachain - - export SUBSTRATE_RELAY_PATH=/usr/local/bin/substrate-relay - export ZOMBIENET_BINARY_PATH=/usr/local/bin/zombie -else - export POLKADOT_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot - export POLKADOT_PARACHAIN_BINARY_PATH=$POLKADOT_SDK_FOLDER/target/release/polkadot-parachain - export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_ROCOCO=$POLKADOT_PARACHAIN_BINARY_PATH - export POLKADOT_PARACHAIN_BINARY_PATH_FOR_ASSET_HUB_WESTEND=$POLKADOT_PARACHAIN_BINARY_PATH - - export SUBSTRATE_RELAY_PATH=~/local_bridge_testing/bin/substrate-relay - export ZOMBIENET_BINARY_PATH=~/local_bridge_testing/bin/zombienet-linux -fi - -# check if `wait` supports -p flag -if [ `printf "$BASH_VERSION\n5.1" | sort -V | head -n 1` = "5.1" ]; then IS_BASH_5_1=1; else IS_BASH_5_1=0; fi - -# check if `wait` supports -p flag -if [ `printf "$BASH_VERSION\n5.1" | sort -V | head -n 1` = "5.1" ]; then IS_BASH_5_1=1; else IS_BASH_5_1=0; fi - -# bridge configuration -export LANE_ID="00000002" - -# tests configuration -ALL_TESTS_FOLDER=`mktemp -d /tmp/bridges-zombienet-tests.XXXXX` - -function start_coproc() { - local command=$1 - local name=$2 - local logname=`basename $name` - local coproc_log=`mktemp -p $TEST_FOLDER $logname.XXXXX` - coproc COPROC { - # otherwise zombienet uses some hardcoded paths - unset RUN_IN_CONTAINER - unset ZOMBIENET_IMAGE - - $command >$coproc_log 2>&1 - } - TEST_COPROCS[$COPROC_PID, 0]=$name - TEST_COPROCS[$COPROC_PID, 1]=$coproc_log - echo "Spawned $name coprocess. StdOut + StdErr: $coproc_log" - - return $COPROC_PID -} - -# execute every test from tests folder -TEST_INDEX=$TESTS_BEGIN -while true -do - declare -A TEST_COPROCS - TEST_COPROCS_COUNT=0 - TEST_PREFIX=$(printf "%04d" $TEST_INDEX) - - # it'll be used by the `sync-exit.sh` script - export TEST_FOLDER=`mktemp -d -p $ALL_TESTS_FOLDER test-$TEST_PREFIX.XXXXX` - - # check if there are no more tests - zndsl_files=($BRIDGE_TESTS_FOLDER/$TEST_PREFIX-*.zndsl) - if [ ${#zndsl_files[@]} -eq 0 ]; then - break - fi - - # start tests - for zndsl_file in "${zndsl_files[@]}"; do - start_coproc "$ZOMBIENET_BINARY_PATH --provider native test $zndsl_file" "$zndsl_file" - echo -n "1">>$TEST_FOLDER/exit-sync - ((TEST_COPROCS_COUNT++)) - done - # wait until all tests are completed - for n in `seq 1 $TEST_COPROCS_COUNT`; do - if [ "$IS_BASH_5_1" -eq 1 ]; then - wait -n -p COPROC_PID - exit_code=$? - coproc_name=${TEST_COPROCS[$COPROC_PID, 0]} - coproc_log=${TEST_COPROCS[$COPROC_PID, 1]} - coproc_stdout=$(cat $coproc_log) - else - wait -n - exit_code=$? - coproc_name="" - coproc_stdout="" - fi - echo "Process $coproc_name has finished with exit code: $exit_code" - - # if exit code is not zero, exit - if [ $exit_code -ne 0 ]; then - echo "=====================================================================" - echo "=== Shutting down. Log of failed process below ===" - echo "=====================================================================" - echo "$coproc_stdout" - - exit 1 - fi - done - - # proceed to next index - ((TEST_INDEX++)) - if [ "$TEST_INDEX" -ge "$TESTS_END" ]; then - break - fi - - # kill relay here - it is started manually by tests - killall substrate-relay -done - -echo "=====================================================================" -echo "=== All tests have completed successfully ===" -echo "=====================================================================" diff --git a/zombienet/scripts/invoke-script.sh b/zombienet/scripts/invoke-script.sh deleted file mode 100755 index 835b4fe500f01ea2968bcb8bff538491ec7149bc..0000000000000000000000000000000000000000 --- a/zombienet/scripts/invoke-script.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -INVOKE_LOG=`mktemp -p $TEST_FOLDER invoke.XXXXX` - -pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_westend.sh $1 >$INVOKE_LOG 2>&1 -popd diff --git a/zombienet/scripts/start-relayer.sh b/zombienet/scripts/start-relayer.sh deleted file mode 100755 index 2f72b5ee556bcc8a89b2de4c5d3c53db8ac072b1..0000000000000000000000000000000000000000 --- a/zombienet/scripts/start-relayer.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -RELAY_LOG=`mktemp -p $TEST_FOLDER relay.XXXXX` - -pushd $POLKADOT_SDK_FOLDER/cumulus/scripts -./bridges_rococo_westend.sh run-relay >$RELAY_LOG 2>&1& -popd diff --git a/zombienet/scripts/sync-exit.sh b/zombienet/scripts/sync-exit.sh deleted file mode 100755 index cc20b098e7830fc164f3a0a643840c1e8188b7f2..0000000000000000000000000000000000000000 --- a/zombienet/scripts/sync-exit.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -e - -# every network adds a char to the file, let's remove ours -truncate -s -1 $TEST_FOLDER/exit-sync - -# when all chars are removed, then our test is done -while true -do - if [ `stat --printf="%s" $TEST_FOLDER/exit-sync` -eq 0 ]; then - exit - fi - sleep 100 -done diff --git a/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl b/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl deleted file mode 100644 index 82d1eee2f45cc12b60a85b829d4a4c17588fa9e7..0000000000000000000000000000000000000000 --- a/zombienet/tests/0001-asset-transfer-works-rococo-to-westend.zndsl +++ /dev/null @@ -1,39 +0,0 @@ -Description: User is able to transfer ROC from Rococo Asset Hub to Westend Asset Hub and back -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml -Creds: config - -# step 0: start relayer -# (started by sibling 0001-asset-transfer-works-westend-to-rococo.zndsl test) - -# step 1: initialize Westend AH -asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 60 seconds - -# step 2: initialize Westend bridge hub -bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 60 seconds - -# step 3: ensure that initialization has completed -asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds - -# step 4: relay is already started - let's wait until with-Rococo GRANPDA pallet is initialized at Westend -bridge-hub-westend-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Rococo,0" within 400 seconds - -# step 5: send WND to //Alice on Rococo AH -# (that's a required part of a sibling 0001-asset-transfer-works-westend-to-rococo.zndsl test) -asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds - -# step 6: elsewhere Rococo has sent ROC to //Alice - let's wait for it -asset-hub-westend-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Rococo" within 600 seconds - -# step 7: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726f,BridgedChain,0" within 300 seconds -bridge-hub-westend-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x6268726F,ThisChain,0" within 300 seconds - -# step 8: send wROC back to Alice at Rococo AH -asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "withdraw-reserve-assets-from-asset-hub-westend-local" within 60 seconds - -# step 9: elsewhere Rococo has sent wWND to //Alice - let's wait for it -# (we wait until //Alice account increases here - there are no other transactionc that may increase it) -asset-hub-westend-collator1: js-script ../helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl b/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl deleted file mode 100644 index acfe0df03d26779abf0dd3c2aa3dfc8f37c0e3aa..0000000000000000000000000000000000000000 --- a/zombienet/tests/0001-asset-transfer-works-westend-to-rococo.zndsl +++ /dev/null @@ -1,39 +0,0 @@ -Description: User is able to transfer WND from Westend Asset Hub to Rococo Asset Hub and back -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml -Creds: config - -# step 0: start relayer -bridge-hub-rococo-collator1: run ../scripts/start-relayer.sh within 60 seconds - -# step 1: initialize Rococo AH -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 60 seconds - -# step 2: initialize Rococo bridge hub -bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 60 seconds - -# step 3: ensure that initialization has completed -asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 600 seconds - -# step 4: relay is already started - let's wait until with-Westend GRANPDA pallet is initialized at Rococo -bridge-hub-rococo-collator1: js-script ../helpers/best-finalized-header-at-bridged-chain.js with "Westend,0" within 400 seconds - -# step 5: send ROC to //Alice on Westend AH -# (that's a required part of a sibling 0001-asset-transfer-works-rococo-to-westend.zndsl test) -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds - -# step 6: elsewhere Westend has sent WND to //Alice - let's wait for it -asset-hub-rococo-collator1: js-script ../helpers/wrapped-assets-balance.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY,0,Westend" within 600 seconds - -# step 7: check that the relayer //Charlie is rewarded by both our AH and target AH -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,BridgedChain,0" within 300 seconds -bridge-hub-rococo-collator1: js-script ../helpers/relayer-rewards.js with "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y,0x00000002,0x62687764,ThisChain,0" within 300 seconds - -# step 8: send wWND back to Alice at Westend AH -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "withdraw-reserve-assets-from-asset-hub-rococo-local" within 60 seconds - -# step 9: elsewhere Westend has sent wROC to //Alice - let's wait for it -# (we wait until //Alice account increases here - there are no other transactionc that may increase it) -asset-hub-rococo-collator1: js-script ../helpers/native-assets-balance-increased.js with "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY" within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/zombienet/tests/0002-mandatory-headers-synced-while-idle-rococo-to-westend.zndsl b/zombienet/tests/0002-mandatory-headers-synced-while-idle-rococo-to-westend.zndsl deleted file mode 100644 index eb6a75c373c7add04f895c01e332d40195150370..0000000000000000000000000000000000000000 --- a/zombienet/tests/0002-mandatory-headers-synced-while-idle-rococo-to-westend.zndsl +++ /dev/null @@ -1,26 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Rococo (and a single Rococo BH) headers to Westend BH. -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml -Creds: config - -# step 1: initialize Westend bridge hub -bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 60 seconds - -# step 2: sleep some time before starting relayer. We want to sleep for at least 1 session, which is expected to -# be 60 seconds for test environment. -sleep 120 seconds - -# step 3: start relayer -# (it is started by the sibling 0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl test file) - -# it also takes some time for relayer to initialize bridge, so let's sleep for 5 minutes to be sure that parachain -# header has been synced - -# step 4: ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# born while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-westend-collator1: js-script ../helpers/only-mandatory-headers-synced-when-idle.js with "300,rococo-at-westend" within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/zombienet/tests/0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl b/zombienet/tests/0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl deleted file mode 100644 index 728d54d586a9b46625e3db70251b68c6501db922..0000000000000000000000000000000000000000 --- a/zombienet/tests/0002-mandatory-headers-synced-while-idle-westend-to-rococo.zndsl +++ /dev/null @@ -1,26 +0,0 @@ -Description: While relayer is idle, we only sync mandatory Westend (and a single Westend BH) headers to Rococo BH. -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml -Creds: config - -# step 1: initialize Rococo bridge hub -bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 60 seconds - -# step 2: sleep some time before starting relayer. We want to sleep for at least 1 session, which is expected to -# be 60 seconds for test environment. -sleep 120 seconds - -# step 3: start relayer -bridge-hub-rococo-collator1: run ../scripts/start-relayer.sh within 60 seconds - -# it also takes some time for relayer to initialize bridge, so let's sleep for 5 minutes to be sure that parachain -# header has been synced - -# step 4: ensure that relayer is only syncing mandatory headers while idle. This includes both headers that were -# born while relay was offline and those in the next 100 seconds while script is active. -bridge-hub-rococo-collator1: js-script ../helpers/only-mandatory-headers-synced-when-idle.js with "300,westend-at-rococo" within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/zombienet/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl b/zombienet/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl deleted file mode 100644 index a4960344f0a03265d2accfa52cd9a4ab1d7117d6..0000000000000000000000000000000000000000 --- a/zombienet/tests/0003-required-headers-synced-while-active-rococo-to-westend.zndsl +++ /dev/null @@ -1,26 +0,0 @@ -Description: While relayer is active, we only sync mandatory and required Rococo (and Rococo BH) headers to Westend BH. -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_westend_local_network.toml -Creds: config - -# step 1: initialize Westend AH -asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-westend-local" within 60 seconds - -# step 2: initialize Westend bridge hub -bridge-hub-westend-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-westend-local" within 60 seconds - -# step 3: ensure that initialization has completed -asset-hub-westend-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1002" within 600 seconds - -# step 4: send message from Westend to Rococo -asset-hub-westend-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-westend-local" within 60 seconds - -# step 5: start relayer -# (we are starting it after sending the message to be sure that relayer won't relay messages before our js script -# will be started at step 6) -# (it is started by sibling 0003-required-headers-synced-while-active-westend-to-rococo.zndsl) - -# step 6: ensure that relayer won't sync any extra headers while delivering messages and confirmations -bridge-hub-westend-collator1: js-script ../helpers/only-required-headers-synced-when-active.js with "500,rococo-at-westend" within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-westend-collator1: run ../scripts/sync-exit.sh within 600 seconds diff --git a/zombienet/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl b/zombienet/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl deleted file mode 100644 index 33c3ceebcf844cc6029d41deb289b1a1d8103132..0000000000000000000000000000000000000000 --- a/zombienet/tests/0003-required-headers-synced-while-active-westend-to-rococo.zndsl +++ /dev/null @@ -1,26 +0,0 @@ -Description: While relayer is active, we only sync mandatory and required Westend (and Westend BH) headers to Rococo BH. -Network: ../../../cumulus/zombienet/bridge-hubs/bridge_hub_rococo_local_network.toml -Creds: config - -# step 1: initialize Rococo AH -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-asset-hub-rococo-local" within 60 seconds - -# step 2: initialize Rococo bridge hub -bridge-hub-rococo-collator1: run ../scripts/invoke-script.sh with "init-bridge-hub-rococo-local" within 60 seconds - -# step 3: ensure that initialization has completed -asset-hub-rococo-collator1: js-script ../helpers/wait-hrmp-channel-opened.js with "1013" within 600 seconds - -# step 4: send message from Rococo to Westend -asset-hub-rococo-collator1: run ../scripts/invoke-script.sh with "reserve-transfer-assets-from-asset-hub-rococo-local" within 60 seconds - -# step 5: start relayer -# (we are starting it after sending the message to be sure that relayer won't relay messages before our js script -# will be started at step 6) -bridge-hub-rococo-collator1: run ../scripts/start-relayer.sh within 60 seconds - -# step 6: ensure that relayer won't sync any extra headers while delivering messages and confirmations -bridge-hub-rococo-collator1: js-script ../helpers/only-required-headers-synced-when-active.js with "500,westend-at-rococo" within 600 seconds - -# wait until other network test has completed OR exit with an error too -asset-hub-rococo-collator1: run ../scripts/sync-exit.sh within 600 seconds