diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index ad2120ae092225d36f988b787dd09796661d98c8..7dcf9ed1908e3c747278fd8f7e129ea0ac889889 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -8,23 +8,31 @@ title: Polkadot {{ env.VERSION }} Release checklist This is the release checklist for Polkadot {{ env.VERSION }}. **All** following checks should be completed before publishing a new release of the Polkadot/Kusama/Westend runtime or client. The current release candidate can be -checked out with `git checkout {{ env.VERSION }}` +checked out with `git checkout release-{{ env.VERSION }}` ### Runtime Releases +These checks should be performed on the codebase prior to forking to a release- +candidate branch. + - [ ] Verify [`spec_version`](#spec-version) has been incremented since the last release for any native runtimes from any existing use on public (non-private/test) networks. -- [ ] Verify [new migrations](#new-migrations) complete successfully, and the - runtime state is correctly updated. - [ ] Verify previously [completed migrations](#old-migrations-removed) are - removed. + removed for any public (non-private/test) networks. - [ ] Verify pallet and [extrinsic ordering](#extrinsic-ordering) has stayed the same. Bump `transaction_version` if not. - [ ] Verify new extrinsics have been correctly whitelisted/blacklisted for [proxy filters](#proxy-filtering). - [ ] Verify [benchmarks](#benchmarks) have been updated for any modified runtime logic. + +The following checks can be performed after we have forked off to the release- +candidate branch. + +- [ ] Verify [new migrations](#new-migrations) complete successfully, and the + runtime state is correctly updated for any public (non-private/test) + networks. - [ ] Verify [Polkadot JS API](#polkadot-js) are up to date with the latest runtime changes. @@ -59,7 +67,8 @@ Add any necessary assets to the release. They should include: The release notes should list: -- The priority of the release (i.e., how quickly users should upgrade) +- The priority of the release (i.e., how quickly users should upgrade) - this is + based on the max priority of any *client* changes. - Which native runtimes and their versions are included - The proposal hashes of the runtimes as built with [srtool](https://gitlab.com/chevdor/srtool) @@ -77,16 +86,17 @@ A runtime upgrade must bump the spec number. This may follow a pattern with the client release (e.g. runtime v12 corresponds to v0.8.12, even if the current runtime is not v11). +### Old Migrations Removed + +Any previous `on_runtime_upgrade` functions from old upgrades must be removed +to prevent them from executing a second time. The `on_runtime_upgrade` function +can be found in `runtime//src/lib.rs`. + ### New Migrations Ensure that any migrations that are required due to storage or logic changes are included in the `on_runtime_upgrade` function of the appropriate pallets. -### Old Migrations Removed - -Any previous `on_runtime_upgrade` functions from old upgrades must be removed -to prevent them from executing a second time. - ### Extrinsic Ordering Offline signing libraries depend on a consistent ordering of call indices and @@ -94,6 +104,23 @@ functions. Compare the metadata of the current and new runtimes and ensure that the `module index, call index` tuples map to the same set of functions. In case of a breaking change, increase `transaction_version`. +To verify the order has not changed: + +1. Download the latest release-candidate binary either from the draft-release +on Github, or +[AWS](https://releases.parity.io/polkadot/x86_64-debian:stretch/{{ env.VERSION }}-rc1/polkadot) +(adjust the rc in this URL as necessary). +2. Run the release-candidate binary using a local chain: +`./polkadot --chain=polkadot-local` or `./polkadot --chain=kusama.local` +3. Use [`polkadot-js-tools`](https://github.com/polkadot-js/tools) to compare +the metadata: + - For Polkadot: `docker run --network host jacogr/polkadot-js-tools metadata wss://rpc.polkadot.io ws://localhost:9944` + - For Kusama: `docker run --network host jacogr/polkadot-js-tools metadata wss://kusama-rpc.polkadot.io ws://localhost:9944` +4. Things to look for in the output are lines like: + - `[Identity] idx 28 -> 25 (calls 15)` - indicates the index for `Identity` has changed + - `[+] Society, Recovery` - indicates the new version includes 2 additional modules/pallets. + - If no indices have changed, every modules line should look something like `[Identity] idx 25 (calls 15)` + Note: Adding new functions to the runtime does not constitute a breaking change as long as they are added to the end of a pallet (i.e., does not break any other call index). diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000000000000000000000000000000000..f2b6dfc4f0ec1f01a54f2933ca561532e94ba14a --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,15 @@ +version: 2 +updates: + - package-ecosystem: "cargo" + directory: "/" + labels: ["A2-insubstantial", "B0-silent", "C1-low"] + # Handle updates for crates from github.com/paritytech/substrate manually. + ignore: + - dependency-name: "substrate-*" + - dependency-name: "sc-*" + - dependency-name: "sp-*" + - dependency-name: "frame-*" + - dependency-name: "fork-tree" + - dependency-name: "pallet-*" + schedule: + interval: "daily" diff --git a/.github/workflows/publish-docker-release.yml b/.github/workflows/publish-docker-release.yml index 8ccc605d9a6b0f5699c690a6a4e13f4238e8aade..811849c561a5bb5d813f9069eca89ab75d165247 100644 --- a/.github/workflows/publish-docker-release.yml +++ b/.github/workflows/publish-docker-release.yml @@ -14,12 +14,12 @@ jobs: uses: docker/setup-buildx-action@v1 - name: Cache Docker layers uses: actions/cache@v2 - - name: Login to Dockerhub with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} restore-keys: | ${{ runner.os }}-buildx- + - name: Login to Dockerhub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} @@ -29,10 +29,14 @@ jobs: uses: docker/build-push-action@v2 with: push: true - file: scripts/docker/Dockerfile.release + file: scripts/docker/release.Dockerfile tags: | parity/polkadot:latest parity/polkadot:${{ github.event.release.tag_name }} + build-args: | + POLKADOT_VERSION=${{ github.event.release.tag_name }} + VCS_REF=${{ github.ref }} + BUILD_DATE=${{ github.event.release.published_at }} cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache - name: Image digest diff --git a/.github/workflows/publish-draft-release.yml b/.github/workflows/publish-draft-release.yml index f091a177f5082a4df705aeb91b311fb8a9385d41..05b5dc652ea63f54405d1a98e4fd89932c61f610 100644 --- a/.github/workflows/publish-draft-release.yml +++ b/.github/workflows/publish-draft-release.yml @@ -13,12 +13,12 @@ jobs: matrix: runtime: ['polkadot', 'kusama'] container: - image: chevdor/srtool:nightly-2020-07-20 + image: paritytech/srtool:nightly-2020-10-27 volumes: - ${{ github.workspace }}:/build env: PACKAGE: ${{ matrix.runtime }}-runtime - RUSTC_VERSION: nightly-2020-07-20 + RUSTC_VERSION: nightly-2020-10-27 steps: - uses: actions/checkout@v2 - name: Cache target dir @@ -139,5 +139,5 @@ jobs: with: room_id: ${{ secrets.INTERNAL_POLKADOT_MATRIX_ROOM_ID }} access_token: ${{ secrets.MATRIX_ACCESS_TOKEN }} - message: "**New version of polkadot tagged**: ${{ github.ref }}
Gav: Draft release created: ${{ needs.publish-draft-release.outputs.release_url }}" + message: "**New version of polkadot tagged**: ${{ github.ref }}
Draft release created: ${{ needs.publish-draft-release.outputs.release_url }}" server: "matrix.parity.io" diff --git a/.github/workflows/release-candidate.yml b/.github/workflows/release-candidate.yml index acbd7dbabebab8e74f704ed6e01a670c2413f83c..515d9a143b4f0bd2d2caa75ccb1ca5705c5de4c8 100644 --- a/.github/workflows/release-candidate.yml +++ b/.github/workflows/release-candidate.yml @@ -45,7 +45,7 @@ jobs: if: steps.compute_tag.outputs.first_rc == 'true' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - BRANCH: ${{ steps.compute_tag.outputs.version }} + VERSION: ${{ steps.compute_tag.outputs.version }} with: filename: .github/ISSUE_TEMPLATE/release.md - uses: s3krit/matrix-message-action@v0.0.2 diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 913c9e43c84e5f55465d130747e507eba0be9e55..07438a8c8f974af37eab80599c57875eb94d1e2c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -116,6 +116,9 @@ test-linux-stable: &test # but still want to have debug assertions. RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" TARGET: native + artifacts: + paths: + - ./target/release/polkadot script: - ./scripts/gitlab/test_linux_stable.sh - sccache -s @@ -141,6 +144,16 @@ check-runtime-benchmarks: &test - ./scripts/gitlab/check_runtime_benchmarks.sh - sccache -s +check-transaction-versions: + image: node:15 + stage: build + needs: + - job: test-linux-stable + before_script: + - npm install -g @polkadot/metadata-cmp + - git fetch origin release + script: "scripts/gitlab/check_extrinsics_ordering.sh" + build-wasm-release: stage: build <<: *collect-artifacts diff --git a/Cargo.lock b/Cargo.lock index 67a77c834efdd53dd4cc23497c2f912fe701b619..cbad792eabf85f81e6da7170e80b8d0b21fe4b62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,7 +31,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fc95d1bdb8e6666b2b217308eeeb09f2d6728d104be3e31916cc74d15420331" dependencies = [ - "generic-array 0.14.2", + "generic-array 0.14.4", ] [[package]] @@ -42,7 +42,7 @@ checksum = "f7001367fde4c768a19d1029f0a8be5abd9308e1119846d5bd9ad26297b8faf5" dependencies = [ "aes-soft", "aesni", - "block-cipher", + "block-cipher 0.7.1", ] [[package]] @@ -53,7 +53,7 @@ checksum = "86f5007801316299f922a6198d1d09a0bae95786815d066d5880d13f7c45ead1" dependencies = [ "aead", "aes", - "block-cipher", + "block-cipher 0.7.1", "ghash", "subtle 2.2.3", ] @@ -64,8 +64,8 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4925647ee64e5056cf231608957ce7c81e12d6d6e316b9ce1404778cc1d35fa7" dependencies = [ - "block-cipher", - "byteorder 1.3.4", + "block-cipher 0.7.1", + "byteorder", "opaque-debug 0.2.3", ] @@ -75,24 +75,21 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d050d39b0b7688b3a3254394c3e30a9d66c41dcf9b05b0e2dbdc623f6505d264" dependencies = [ - "block-cipher", + "block-cipher 0.7.1", "opaque-debug 0.2.3", ] [[package]] name = "ahash" -version = "0.2.18" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" -dependencies = [ - "const-random", -] +checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" [[package]] name = "ahash" -version = "0.3.8" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" +checksum = "f6789e291be47ace86a60303502173d84af8327e3627ecf334356ee0f87a164c" [[package]] name = "aho-corasick" @@ -103,17 +100,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alga" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" -dependencies = [ - "approx", - "num-complex", - "num-traits 0.2.12", -] - [[package]] name = "ansi_term" version = "0.11.0" @@ -134,9 +120,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.31" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" +checksum = "bf8dcb5b4bbaa28653b647d8c77bd4ed40183b48882e130c1f1ffb73de069fd7" [[package]] name = "approx" @@ -170,9 +156,9 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "asn1_der" @@ -190,17 +176,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "assert_cmd" -version = "0.12.2" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936fcf2c692b37c696cd0002c57752b2d9478402450c9ca4a463f6afae16d6f5" +checksum = "3dc1679af9a1ab4bea16f228b05d18f8363f8327b1fa8db00d2760cfafc6b61e" dependencies = [ "doc-comment", - "escargot", "predicates", "predicates-core", "predicates-tree", @@ -209,9 +194,9 @@ dependencies = [ [[package]] name = "assert_matches" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7deb0a829ca7bcfaf5da70b073a8d128619259a7be8216a355e23f00763059e5" +checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" [[package]] name = "async-channel" @@ -234,7 +219,7 @@ dependencies = [ "concurrent-queue", "fastrand", "futures-lite", - "once_cell 1.4.1", + "once_cell", "vec-arena", ] @@ -248,28 +233,26 @@ dependencies = [ "async-io", "futures-lite", "num_cpus", - "once_cell 1.4.1", + "once_cell", ] [[package]] name = "async-io" -version = "1.1.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38628c78a34f111c5a6b98fc87dfc056cd1590b61afe748b145be4623c56d194" +checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" dependencies = [ - "cfg-if", "concurrent-queue", "fastrand", "futures-lite", "libc", - "log 0.4.11", - "once_cell 1.4.1", + "log", + "nb-connect", + "once_cell", "parking", "polling", - "socket2", "vec-arena", "waker-fn", - "wepoll-sys-stjepang", "winapi 0.3.9", ] @@ -299,11 +282,11 @@ dependencies = [ "futures-lite", "gloo-timers", "kv-log-macro", - "log 0.4.11", + "log", "memchr", "num_cpus", - "once_cell 1.4.1", - "pin-project-lite", + "once_cell", + "pin-project-lite 0.1.7", "pin-utils", "slab", "wasm-bindgen-futures", @@ -317,32 +300,36 @@ checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-tls" -version = "0.8.0" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df097e3f506bec0e1a24f06bb3c962c228f36671de841ff579cb99f371772634" +checksum = "dd0d8b6fc362bebff7502479fb5e9aed00c8cc3abc5af755536e73a128f0cb88" dependencies = [ - "futures 0.3.5", - "rustls", + "futures-core", + "futures-io", + "rustls 0.19.0", "webpki", - "webpki-roots 0.19.0", + "webpki-roots", ] [[package]] name = "async-trait" -version = "0.1.36" +version = "0.1.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "atomic" -version = "0.4.6" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64f46ca51dca4837f1520754d1c8c36636356b81553d928dc9c177025369a06e" +checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +dependencies = [ + "autocfg 1.0.0", +] [[package]] name = "atomic-waker" @@ -380,7 +367,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" dependencies = [ "addr2line", - "cfg-if", + "cfg-if 0.1.10", "libc", "miniz_oxide", "object 0.20.0", @@ -405,13 +392,19 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + [[package]] name = "bincode" version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f30d3a39baa26f9651f17b375061f3233dde33424a8b72b0dbe93a68a0bc896d" dependencies = [ - "byteorder 1.3.4", + "byteorder", "serde", ] @@ -423,15 +416,15 @@ checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" dependencies = [ "bitflags", "cexpr", - "cfg-if", + "cfg-if 0.1.10", "clang-sys", "clap", "env_logger 0.7.1", "lazy_static", "lazycell", - "log 0.4.11", + "log", "peeking_take_while", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", "regex", "rustc-hash", @@ -439,33 +432,12 @@ dependencies = [ "which", ] -[[package]] -name = "bip39" -version = "0.6.0-beta.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7059804e226b3ac116519a252d7f5fb985a5ccc0e93255e036a5f7e7283323f4" -dependencies = [ - "failure", - "hashbrown 0.1.8", - "hmac", - "once_cell 0.1.8", - "pbkdf2", - "rand 0.6.5", - "sha2 0.8.2", -] - [[package]] name = "bitflags" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitmask" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da9b3d9f6f585199287a473f4f8dfab6566cf827d15c00c219f53c645687ead" - [[package]] name = "bitvec" version = "0.17.4" @@ -483,7 +455,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84ce5b6108f8e154604bd4eb76a2f726066c3464d5a552a4229262a18c9bb471" dependencies = [ "byte-tools", - "byteorder 1.3.4", + "byteorder", "crypto-mac 0.8.0", "digest 0.9.0", "opaque-debug 0.2.3", @@ -506,18 +478,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" dependencies = [ "arrayref", - "arrayvec 0.5.1", - "constant_time_eq", -] - -[[package]] -name = "blake2s_simd" -version = "0.5.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9e07352b829279624ceb7c64adb4f585dacdb81d35cafae81139ccd617cf44" -dependencies = [ - "arrayref", - "arrayvec 0.5.1", + "arrayvec 0.5.2", "constant_time_eq", ] @@ -527,9 +488,9 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" dependencies = [ - "block-padding", + "block-padding 0.1.5", "byte-tools", - "byteorder 1.3.4", + "byteorder", "generic-array 0.12.3", ] @@ -539,7 +500,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.2", + "block-padding 0.2.1", + "generic-array 0.14.4", ] [[package]] @@ -548,7 +510,16 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa136449e765dc7faa244561ccae839c394048667929af599b5d931ebe7b7f10" dependencies = [ - "generic-array 0.14.2", + "generic-array 0.14.4", +] + +[[package]] +name = "block-cipher" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f337a3e6da609650eb74e02bc9fac7b735049f7623ab12f2e4c719316fcc7e80" +dependencies = [ + "generic-array 0.14.4", ] [[package]] @@ -560,6 +531,12 @@ dependencies = [ "byte-tools", ] +[[package]] +name = "block-padding" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" + [[package]] name = "blocking" version = "1.0.2" @@ -571,14 +548,14 @@ dependencies = [ "atomic-waker", "fastrand", "futures-lite", - "once_cell 1.4.1", + "once_cell", ] [[package]] name = "bs58" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" +checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" [[package]] name = "bstr" @@ -589,6 +566,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "build-helper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdce191bf3fa4995ce948c8c83b4640a1745457a149e73c6db75b4ffe36aad5f" +dependencies = [ + "semver 0.6.0", +] + [[package]] name = "bumpalo" version = "3.4.0" @@ -607,12 +593,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" -[[package]] -name = "byteorder" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855" - [[package]] name = "byteorder" version = "1.3.4" @@ -625,7 +605,7 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" dependencies = [ - "byteorder 1.3.4", + "byteorder", "either", "iovec", ] @@ -637,16 +617,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" [[package]] -name = "c_linked_list" +name = "cache-padded" version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4964518bd3b4a8190e832886cdc0da9794f12e8e6c1613a9e90ff331c4c8724b" +checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] -name = "cache-padded" -version = "1.1.1" +name = "cargo_metadata" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +checksum = "83f95cf4bf0dda0ac2e65371ae7215d0dce3c187613a9dbf23aaa9374186f97a" +dependencies = [ + "semver 0.11.0", + "semver-parser 0.10.0", + "serde", + "serde_json", +] [[package]] name = "cc" @@ -672,6 +658,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chacha20" version = "0.4.3" @@ -754,26 +746,27 @@ dependencies = [ [[package]] name = "color-eyre" -version = "0.5.6" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2a5123db5af8349c41c43ed0e5dca1cd56c911ea0c4ce6e6ff30f159fa5d27e" +checksum = "7b29030875fd8376e4a28ef497790d5b4a7843d8d1396bf08ce46f5eec562c5c" dependencies = [ "backtrace", "color-spantrace", "eyre", "indenter", - "once_cell 1.4.1", + "once_cell", "owo-colors", "tracing-error", ] [[package]] name = "color-spantrace" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a99aa4aa18448eef4c7d3f86d2720d2d8cad5c860fe9ff9b279293efdc8f5be" +checksum = "b6eee477a4a8a72f4addd4de416eb56d54bc307b284d6601bafdee1f4ea462d1" dependencies = [ - "ansi_term 0.11.0", + "once_cell", + "owo-colors", "tracing-core", "tracing-error", ] @@ -793,40 +786,20 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8d976903543e0c48546a91908f21588a680a8c8f984df9a5d69feccb2b2a211" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "wasm-bindgen", ] [[package]] name = "console_log" -version = "0.1.2" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e7871d2947441b0fdd8e2bd1ce2a2f75304f896582c0d572162d48290683c48" +checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494" dependencies = [ - "log 0.4.11", + "log", "web-sys", ] -[[package]] -name = "const-random" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f1af9ac737b2dd2d577701e59fd09ba34822f6f2ebdb30a7647405d9e55e16a" -dependencies = [ - "const-random-macro", - "proc-macro-hack", -] - -[[package]] -name = "const-random-macro" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25e4c606eb459dd29f7c57b2e0879f2b6f14ee130918c2b78ccb58a9624e6c7a" -dependencies = [ - "getrandom", - "proc-macro-hack", -] - [[package]] name = "constant_time_eq" version = "0.1.5" @@ -870,16 +843,16 @@ version = "0.66.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d9badfe36176cb653506091693bc2bb1970c9bddfcd6ec7fac404f7eaec6f38" dependencies = [ - "byteorder 1.3.4", + "byteorder", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", "gimli 0.21.0", - "log 0.4.11", + "log", "regalloc", "serde", - "smallvec 1.4.2", + "smallvec 1.5.1", "target-lexicon", "thiserror", ] @@ -916,8 +889,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ef419efb4f94ecc02e5d9fbcc910d2bb7f0040e2de570e63a454f883bc891d6" dependencies = [ "cranelift-codegen", - "log 0.4.11", - "smallvec 1.4.2", + "log", + "smallvec 1.5.1", "target-lexicon", ] @@ -941,7 +914,7 @@ dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "log 0.4.11", + "log", "serde", "thiserror", "wasmparser 0.59.0", @@ -953,7 +926,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] @@ -974,12 +947,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ "autocfg 1.0.0", - "cfg-if", + "cfg-if 0.1.10", "crossbeam-utils", "lazy_static", "maybe-uninit", "memoffset", - "scopeguard 1.1.0", + "scopeguard", ] [[package]] @@ -988,7 +961,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "crossbeam-utils", "maybe-uninit", ] @@ -1000,7 +973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ "autocfg 1.0.0", - "cfg-if", + "cfg-if 0.1.10", "lazy_static", ] @@ -1026,7 +999,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.2", + "generic-array 0.14.4", "subtle 2.2.3", ] @@ -1039,14 +1012,25 @@ dependencies = [ "sct", ] +[[package]] +name = "ctor" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fbaabec2c953050352311293be5c6aba8e141ba19d6811862b232d6fd020484" +dependencies = [ + "quote 1.0.7", + "syn 1.0.48", +] + [[package]] name = "cuckoofilter" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd43f7cfaffe0a386636a10baea2ee05cc50df3b77bea4a456c9572a939bf1f" +checksum = "b810a8449931679f64cd7eef1bbd0fa315801b6d5d9cdc1ace2804d6529eee18" dependencies = [ - "byteorder 0.5.3", - "rand 0.3.23", + "byteorder", + "fnv", + "rand 0.7.3", ] [[package]] @@ -1055,18 +1039,31 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" dependencies = [ - "byteorder 1.3.4", + "byteorder", "digest 0.8.1", "rand_core 0.5.1", "subtle 2.2.3", "zeroize", ] +[[package]] +name = "curve25519-dalek" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.2.3", + "zeroize", +] + [[package]] name = "data-encoding" -version = "2.2.1" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72aa14c04dfae8dd7d8a2b1cb7ca2152618cd01336dbfe704b8dcbf8d41dbd69" +checksum = "993a608597367c6377b258c25d7120740f00ed23a2252b729b1932dd7866f908" [[package]] name = "derive_more" @@ -1074,9 +1071,9 @@ version = "0.99.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -1100,7 +1097,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.2", + "generic-array 0.14.4", ] [[package]] @@ -1109,7 +1106,16 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "551a778172a450d7fc12e629ca3b0428d00f6afa9a43da1b630d54604e97371c" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", + "dirs-sys", +] + +[[package]] +name = "directories" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8fed639d60b58d0f53498ab13d26f621fd77569cc6edb031f4cc36a2ad9da0f" +dependencies = [ "dirs-sys", ] @@ -1126,9 +1132,9 @@ dependencies = [ [[package]] name = "dlmalloc" -version = "0.1.4" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35055b1021724f4eb5262eb49130eebff23fc59fc5a14160e05faad8eeb36673" +checksum = "332570860c2edf2d57914987bf9e24835425f75825086b6ba7d1e6a3e4f1f254" dependencies = [ "libc", ] @@ -1139,7 +1145,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ - "byteorder 1.3.4", + "byteorder", "quick-error 1.2.3", ] @@ -1165,9 +1171,9 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -1187,15 +1193,15 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "1.0.0-pre.4" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a8a37f4e8b35af971e6db5e3897e7a6344caa3f92f6544f88125a1f5f0035a" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 3.0.0", "ed25519", "rand 0.7.3", "serde", - "sha2 0.8.2", + "sha2 0.9.1", "zeroize", ] @@ -1229,9 +1235,9 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "946ee94e3dbf58fdd324f9ce245c7b238d46a66f00e86a020b71996349e46cce" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -1242,20 +1248,20 @@ checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", "humantime 1.3.0", - "log 0.4.11", + "log", "regex", "termcolor", ] [[package]] name = "env_logger" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54532e3223c5af90a6a757c90b5c5521564b07e5e7a958681bcd2afad421cdcd" +checksum = "f26ecb66b4bdca6c1409b40fb255eefc2bd4f6d135dab3c3124f80ffa2a9661e" dependencies = [ "atty", "humantime 2.0.1", - "log 0.4.11", + "log", "regex", "termcolor", ] @@ -1296,18 +1302,6 @@ dependencies = [ "libc", ] -[[package]] -name = "escargot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74cf96bec282dcdb07099f7e31d9fed323bca9435a09aba7b6d99b7617bca96d" -dependencies = [ - "lazy_static", - "log 0.4.11", - "serde", - "serde_json", -] - [[package]] name = "event-listener" version = "2.5.1" @@ -1320,17 +1314,17 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", ] [[package]] name = "eyre" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c5cb4dc433c59f09df4b4450f649cbfed61e8a3505abe32e4154066439157e" +checksum = "534ce924bff9118be8b28b24ede6bf7e96a00b53e4ded25050aa7b526e051e1a" dependencies = [ "indenter", - "once_cell 1.4.1", + "once_cell", ] [[package]] @@ -1349,9 +1343,9 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", "synstructure", ] @@ -1391,9 +1385,9 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2af1a24f391a5a94d756db5092c6576aad494b88a71a5a36b20c67b63e0df034" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "js-sys", - "log 0.4.11", + "log", "serde", "serde_derive", "serde_json", @@ -1408,7 +1402,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b3937f028664bd0e13df401ba49a4567ccda587420365823242977f06609ed1" dependencies = [ "env_logger 0.7.1", - "log 0.4.11", + "log", ] [[package]] @@ -1418,9 +1412,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8feb87a63249689640ac9c011742c33139204e3c134293d3054022276869133b" dependencies = [ "either", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 2.0.2", - "log 0.4.11", + "log", "num-traits 0.2.12", "parity-scale-codec", "parking_lot 0.9.0", @@ -1432,7 +1426,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" dependencies = [ - "byteorder 1.3.4", + "byteorder", "rand 0.7.3", "rustc-hex", "static_assertions", @@ -1450,7 +1444,7 @@ version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "crc32fast", "libc", "libz-sys", @@ -1466,7 +1460,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", ] @@ -1474,7 +1468,7 @@ dependencies = [ [[package]] name = "frame-benchmarking" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -1492,7 +1486,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "chrono", "frame-benchmarking", @@ -1514,7 +1508,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -1530,7 +1524,7 @@ dependencies = [ [[package]] name = "frame-metadata" version = "12.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "serde", @@ -1541,18 +1535,18 @@ dependencies = [ [[package]] name = "frame-support" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "bitmask", + "bitflags", "frame-metadata", "frame-support-procedural", - "impl-trait-for-tuples", - "log 0.4.11", - "once_cell 1.4.1", + "impl-trait-for-tuples 0.1.3", + "log", + "once_cell", "parity-scale-codec", "paste", "serde", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-arithmetic", "sp-core", "sp-inherents", @@ -1566,43 +1560,43 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support-procedural-tools", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "frame-support-procedural-tools" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "frame-support-procedural-tools-derive" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "frame-system" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "parity-scale-codec", "serde", "sp-core", @@ -1615,7 +1609,7 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -1629,7 +1623,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sp-api", @@ -1683,9 +1677,9 @@ checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" [[package]] name = "futures" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" dependencies = [ "futures-channel", "futures-core", @@ -1698,34 +1692,19 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" dependencies = [ "futures-core", "futures-sink", ] -[[package]] -name = "futures-channel-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5e5f4df964fa9c1c2f8bddeb5c3611631cacd93baf810fc8bb2fb4b495c263a" -dependencies = [ - "futures-core-preview", -] - [[package]] name = "futures-core" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" - -[[package]] -name = "futures-core-preview" -version = "0.3.0-alpha.19" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35b6263fb1ef523c3056565fa67b1d16f0a8604ff12b11b08c25f28a734c60a" +checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" [[package]] name = "futures-cpupool" @@ -1744,20 +1723,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdcef58a173af8148b182684c9f2d5250875adbcaff7b5794073894f9d8634a9" dependencies = [ "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.8", "lazy_static", - "log 0.4.11", + "log", "parking_lot 0.9.0", - "pin-project", + "pin-project 0.4.23", "serde", "serde_json", ] [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" dependencies = [ "futures-core", "futures-task", @@ -1767,9 +1746,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" [[package]] name = "futures-lite" @@ -1782,35 +1761,35 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite", + "pin-project-lite 0.1.7", "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" +checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" dependencies = [ - "once_cell 1.4.1", + "once_cell", ] [[package]] @@ -1831,9 +1810,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" dependencies = [ "futures 0.1.29", "futures-channel", @@ -1843,25 +1822,13 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project 1.0.2", "pin-utils", "proc-macro-hack", "proc-macro-nested", "slab", ] -[[package]] -name = "futures-util-preview" -version = "0.3.0-alpha.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce968633c17e5f97936bd2797b6e38fb56cf16a7422319f7ec2e30d3c470e8d" -dependencies = [ - "futures-channel-preview", - "futures-core-preview", - "pin-utils", - "slab", -] - [[package]] name = "futures_codec" version = "0.4.1" @@ -1869,9 +1836,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce54d63f8b0c75023ed920d46fd71d0cbbb830b0ee012726b5b4f506fb6dea5b" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "memchr", - "pin-project", + "pin-project 0.4.23", ] [[package]] @@ -1881,44 +1848,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" [[package]] -name = "generic-array" -version = "0.12.3" +name = "generator" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" dependencies = [ - "typenum", + "cc", + "libc", + "log", + "rustc_version", + "winapi 0.3.9", ] [[package]] name = "generic-array" -version = "0.14.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac746a5f3bbfdadd6106868134545e684693d54d9d44f6e9588a7d54af0bf980" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" dependencies = [ "typenum", - "version_check", ] [[package]] -name = "get_if_addrs" -version = "0.5.3" +name = "generic-array" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abddb55a898d32925f3148bd281174a68eeb68bbfd9a5938a57b18f506ee4ef7" +checksum = "0ed1e761351b56f54eb9dcd0cfaca9fd0daecf93918e1cfc01c8a3d26ee7adcd" dependencies = [ - "c_linked_list", - "get_if_addrs-sys", - "libc", - "winapi 0.2.8", + "typenum", ] [[package]] -name = "get_if_addrs-sys" -version = "0.1.1" +name = "generic-array" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d04f9fb746cf36b191c00f3ede8bde9c8e64f9f4b05ae2694a9ccf5e3f5ab48" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ - "gcc", - "libc", + "typenum", + "version_check", ] [[package]] @@ -1927,7 +1894,7 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "wasi", "wasm-bindgen", @@ -1974,7 +1941,7 @@ dependencies = [ "aho-corasick", "bstr", "fnv", - "log 0.4.11", + "log", "regex", ] @@ -1997,13 +1964,13 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" dependencies = [ - "byteorder 1.3.4", + "byteorder", "bytes 0.4.12", "fnv", "futures 0.1.29", "http 0.1.21", "indexmap", - "log 0.4.11", + "log", "slab", "string", "tokio-io", @@ -2022,7 +1989,7 @@ dependencies = [ "futures-util", "http 0.2.1", "indexmap", - "log 0.4.11", + "log", "slab", "tokio 0.2.21", "tokio-util", @@ -2034,7 +2001,7 @@ version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2764f9796c0ddca4b82c07f25dd2cb3db30b9a8f47940e78e1c883d9e95c3db9" dependencies = [ - "log 0.4.11", + "log", "pest", "pest_derive", "quick-error 2.0.0", @@ -2059,32 +2026,21 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bae29b6653b3412c2e71e9d486db9f9df5d701941d86683005efb9f2d28e3da" -dependencies = [ - "byteorder 1.3.4", - "scopeguard 0.3.3", -] - -[[package]] -name = "hashbrown" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" +checksum = "ab9b7860757ce258c89fd48d28b68c41713e597a7b09e793f6c6a6e2ea37c827" dependencies = [ - "ahash 0.2.18", - "autocfg 0.1.7", + "ahash 0.3.8", + "autocfg 1.0.0", ] [[package]] name = "hashbrown" -version = "0.8.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab9b7860757ce258c89fd48d28b68c41713e597a7b09e793f6c6a6e2ea37c827" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" dependencies = [ - "ahash 0.3.8", - "autocfg 1.0.0", + "ahash 0.4.6", ] [[package]] @@ -2111,31 +2067,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" -[[package]] -name = "hex-literal" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961de220ec9a91af2e1e5bd80d02109155695e516771762381ef8581317066e0" -dependencies = [ - "hex-literal-impl", - "proc-macro-hack", -] - [[package]] name = "hex-literal" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" -[[package]] -name = "hex-literal-impl" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "853f769599eb31de176303197b7ba4973299c38c7a7604a6bc88c3eef05b9b46" -dependencies = [ - "proc-macro-hack", -] - [[package]] name = "hex_fmt" version = "0.3.0" @@ -2152,6 +2089,16 @@ dependencies = [ "digest 0.8.1", ] +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + [[package]] name = "hmac-drbg" version = "0.2.0" @@ -2160,7 +2107,7 @@ checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" dependencies = [ "digest 0.8.1", "generic-array 0.12.3", - "hmac", + "hmac 0.7.1", ] [[package]] @@ -2243,13 +2190,13 @@ dependencies = [ "httparse", "iovec", "itoa", - "log 0.4.11", + "log", "net2", "rustc_version", "time", "tokio 0.1.22", "tokio-buf", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-io", "tokio-reactor", "tokio-tcp", @@ -2273,8 +2220,8 @@ dependencies = [ "http-body 0.3.1", "httparse", "itoa", - "log 0.4.11", - "pin-project", + "log", + "pin-project 0.4.23", "socket2", "time", "tokio 0.2.21", @@ -2292,8 +2239,8 @@ dependencies = [ "ct-logs", "futures-util", "hyper 0.13.6", - "log 0.4.11", - "rustls", + "log", + "rustls 0.18.0", "rustls-native-certs", "tokio 0.2.21", "tokio-rustls", @@ -2322,6 +2269,43 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "if-addrs" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28538916eb3f3976311f5dfbe67b5362d0add1293d0a9cad17debf86f8e3aa48" +dependencies = [ + "if-addrs-sys", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "if-addrs-sys" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de74b9dd780476e837e5eb5ab7c88b49ed304126e412030a0adba99c8efe79ea" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "if-watch" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d7c5e361e6b05c882b4847dd98992534cebc6fcde7f4bc98225bcf10fd6d0d" +dependencies = [ + "async-io", + "futures 0.3.8", + "futures-lite", + "if-addrs", + "ipnet", + "libc", + "log", + "winapi 0.3.9", +] + [[package]] name = "impl-codec" version = "0.4.2" @@ -2346,9 +2330,20 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f65a8ecf74feeacdab8d38cb129e550ca871cccaa7d1921d8636ecd75534903" +dependencies = [ + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -2359,11 +2354,12 @@ checksum = "e0bd112d44d9d870a6819eb505d04dd92b5e4d94bb8c304924a0872ae7016fb5" [[package]] name = "indexmap" -version = "1.4.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" +checksum = "55e2e4c765aa53a0424761bf9f41aa7a6ac1efa87238f59560640e27fca028f2" dependencies = [ "autocfg 1.0.0", + "hashbrown 0.9.1", "serde", ] @@ -2385,7 +2381,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 2.0.2", ] @@ -2466,24 +2462,24 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.41" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b9172132a62451e56142bff9afc91c8e4a4500aa5b847da36815b63bfda916" +checksum = "cf3d7383929f7c9c7c2d0fa596f325832df98c3704f2c60553080f7127a58175" dependencies = [ "wasm-bindgen", ] [[package]] name = "jsonrpc-client-transports" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f7b1cdf66312002e15682a24430728bd13036c641163c016bc53fb686a7c2d" +checksum = "489b9c612e60c766f751ab40fcb43cbb55a1e10bb44a9b4307ed510ca598cbd7" dependencies = [ "failure", "futures 0.1.29", "jsonrpc-core", "jsonrpc-pubsub", - "log 0.4.11", + "log", "serde", "serde_json", "url 1.7.2", @@ -2491,12 +2487,12 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b12567a31d48588a65b6cf870081e6ba1d7b2ae353977cb9820d512e69c70" +checksum = "0745a6379e3edc893c84ec203589790774e4247420033e71a76d3ab4687991fa" dependencies = [ "futures 0.1.29", - "log 0.4.11", + "log", "serde", "serde_derive", "serde_json", @@ -2504,35 +2500,35 @@ dependencies = [ [[package]] name = "jsonrpc-core-client" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175ca0cf77439b5495612bf216c650807d252d665b4b70ab2eebd895a88fac1" +checksum = "6f764902d7b891344a0acb65625f32f6f7c6db006952143bd650209fbe7d94db" dependencies = [ "jsonrpc-client-transports", ] [[package]] name = "jsonrpc-derive" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2cc6ea7f785232d9ca8786a44e9fa698f92149dcdc1acc4aa1fc69c4993d79e" +checksum = "99a847f9ec7bb52149b2786a17c9cb260d6effc6b8eeb8c16b343a487a7563a3" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "jsonrpc-http-server" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9996b26c0c7a59626d0ed6c5ec8bf06218e62ce1474bd2849f9b9fd38a0158c0" +checksum = "4fb5c4513b7b542f42da107942b7b759f27120b5cc894729f88254b28dff44b7" dependencies = [ "hyper 0.12.35", "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.11", + "log", "net2", "parking_lot 0.10.2", "unicase", @@ -2540,13 +2536,13 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e8f2278fb2b277175b6e21b23e7ecf30e78daff5ee301d0a2a411d9a821a0a" +checksum = "cf50e53e4eea8f421a7316c5f63e395f7bc7c4e786a6dc54d76fab6ff7aa7ce7" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.11", + "log", "parity-tokio-ipc", "parking_lot 0.10.2", "tokio-service", @@ -2554,12 +2550,12 @@ dependencies = [ [[package]] name = "jsonrpc-pubsub" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f389c5cd1f3db258a99296892c21047e21ae73ff4c0e2d39650ea86fe994b4c7" +checksum = "639558e0604013be9787ae52f798506ae42bf4220fe587bdc5625871cc8b9c77" dependencies = [ "jsonrpc-core", - "log 0.4.11", + "log", "parking_lot 0.10.2", "rand 0.7.3", "serde", @@ -2567,15 +2563,15 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c623e1895d0d9110cb0ea7736cfff13191ff52335ad33b21bd5c775ea98b27af" +checksum = "72f1f3990650c033bd8f6bd46deac76d990f9bbfb5f8dc8c4767bf0a00392176" dependencies = [ "bytes 0.4.12", "globset", "jsonrpc-core", "lazy_static", - "log 0.4.11", + "log", "tokio 0.1.22", "tokio-codec", "unicase", @@ -2583,13 +2579,13 @@ dependencies = [ [[package]] name = "jsonrpc-ws-server" -version = "15.0.0" +version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436a92034d0137ab3e3c64a7a6350b428f31cb4d7d1a89f284bcdbcd98a7bc56" +checksum = "6596fe75209b73a2a75ebe1dce4e60e03b88a2b25e8807b667597f6315150d22" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.11", + "log", "parity-ws", "parking_lot 0.10.2", "slab", @@ -2613,7 +2609,7 @@ dependencies = [ [[package]] name = "kusama-runtime" -version = "0.8.26" +version = "0.8.27" dependencies = [ "bitvec", "frame-benchmarking", @@ -2622,9 +2618,9 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", - "hex-literal 0.2.1", + "hex-literal", "libsecp256k1", - "log 0.3.9", + "log", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", @@ -2664,7 +2660,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -2682,8 +2678,8 @@ dependencies = [ "sp-trie", "sp-version", "static_assertions", - "substrate-wasm-builder-runner", - "tiny-keccak 1.5.0", + "substrate-wasm-builder", + "tiny-keccak", ] [[package]] @@ -2692,7 +2688,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ - "log 0.4.11", + "log", ] [[package]] @@ -2702,7 +2698,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0315ef2f688e33844400b31f11c263f2b3dc21d8b9355c6891c5f185fae43f9a" dependencies = [ "parity-util-mem", - "smallvec 1.4.2", + "smallvec 1.5.1", ] [[package]] @@ -2724,14 +2720,14 @@ checksum = "44947dd392f09475af614d740fe0320b66d01cb5b977f664bbbb5e45a70ea4c1" dependencies = [ "fs-swap", "kvdb", - "log 0.4.11", + "log", "num_cpus", "owning_ref", "parity-util-mem", "parking_lot 0.10.2", "regex", "rocksdb", - "smallvec 1.4.2", + "smallvec 1.5.1", ] [[package]] @@ -2740,11 +2736,11 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2701a1369d6ea4f1b9f606db46e5e2a4a8e47f22530a07823d653f85ab1f6c34" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "js-sys", "kvdb", "kvdb-memorydb", - "log 0.4.11", + "log", "parity-util-mem", "send_wrapper 0.3.0", "wasm-bindgen", @@ -2771,9 +2767,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2448f6066e80e3bfc792e9c98bf705b4b0fc6e8ef5b43e5889aff0eaa9c58743" +checksum = "1482821306169ec4d07f6aca392a4681f66c75c9918aa49641a2595db64053cb" [[package]] name = "libloading" @@ -2793,13 +2789,13 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.28.1" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571f5a4604c1a40d75651da141dfde29ad15329f537a779528803297d2220274" +checksum = "941af10b45fd27d15e94aea83002c4a21521849fad8aad78d1cdbf00a60b0a17" dependencies = [ "atomic", "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "lazy_static", "libp2p-core", "libp2p-core-derive", @@ -2822,236 +2818,237 @@ dependencies = [ "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "multihash", "parity-multiaddr", - "parking_lot 0.10.2", - "pin-project", - "smallvec 1.4.2", + "parking_lot 0.11.1", + "pin-project 1.0.2", + "smallvec 1.5.1", "wasm-timer", ] [[package]] name = "libp2p-core" -version = "0.22.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52f13ba8c7df0768af2eb391696d562c7de88cc3a35122531aaa6a7d77754d25" +checksum = "cc9c96d3a606a696a3a6c0ad3c3352c57bda2082ec9090930f1bd9daf787039f" dependencies = [ "asn1_der", "bs58", + "bytes 0.5.6", "ed25519-dalek", "either", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", - "log 0.4.11", + "log", "multihash", "multistream-select", "parity-multiaddr", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.1", + "pin-project 1.0.2", "prost", "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.8.2", - "smallvec 1.4.2", + "sha2 0.9.1", + "smallvec 1.5.1", "thiserror", - "unsigned-varint 0.4.0", + "unsigned-varint", "void", "zeroize", ] [[package]] name = "libp2p-core-derive" -version = "0.20.2" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f753d9324cd3ec14bf04b8a8cd0d269c87f294153d6bf2a84497a63a5ad22213" +checksum = "f4bc40943156e42138d22ed3c57ff0e1a147237742715937622a99b10fbe0156" dependencies = [ "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "libp2p-deflate" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74029ae187f35f4b8ddf26b9779a68b340045d708528a103917cdca49a296db5" +checksum = "5a579d7dd506d0620ba88ccc1754436b7de35ed6c884234f9a226bbfce382640" dependencies = [ "flate2", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cf319822e08dd65c8e060d2354e9f952895bbc433f5706c75ed010c152aee5e" +checksum = "15dea5933f570844d7b5222b12b58f7bd52e9ca38cd65a1bd4f35341f053f012" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", - "log 0.4.11", + "log", ] [[package]] name = "libp2p-floodsub" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a9acb43a3e4a4e413e0c4abe0fa49308df7c6335c88534757b647199cb8a51" +checksum = "23070a0838bd9a8adb27e6eba477eeb650c498f9d139383dd0135d20a8170253" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", + "log", "prost", "prost-build", "rand 0.7.3", - "smallvec 1.4.2", + "smallvec 1.5.1", ] [[package]] name = "libp2p-gossipsub" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab20fcb60edebe3173bbb708c6ac3444afdf1e3152dc2866b10c4f5497f17467" +checksum = "65e8f3aa0906fbad435dac23c177eef3cdfaaf62609791bd7f54f8553edcfdf9" dependencies = [ - "base64 0.11.0", - "byteorder 1.3.4", + "base64 0.13.0", + "byteorder", "bytes 0.5.6", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "hex_fmt", "libp2p-core", "libp2p-swarm", - "log 0.4.11", + "log", "lru_time_cache", "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.2", - "smallvec 1.4.2", - "unsigned-varint 0.4.0", + "sha2 0.9.1", + "smallvec 1.5.1", + "unsigned-varint", "wasm-timer", ] [[package]] name = "libp2p-identify" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56396ee63aa9164eacf40c2c5d2bda8c4133c2f57e1b0425d51d3a4e362583b1" +checksum = "802fb973a7e0dde3fb9a2113a62bad90338ebe01983b706e1d576d0c2af93cda" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", - "log 0.4.11", + "log", "prost", "prost-build", - "smallvec 1.4.2", + "smallvec 1.5.1", "wasm-timer", ] [[package]] name = "libp2p-kad" -version = "0.23.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7fa9047f8b8f544278a35c2d9d45d3b2c1785f2d86d4e1629d6edf97be3955" +checksum = "6506b7b7982f7626fc96a91bc61be4b1fe7ae9ac23824f0ecefcce21cb39238c" dependencies = [ - "arrayvec 0.5.1", + "arrayvec 0.5.2", "bytes 0.5.6", "either", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "libp2p-core", "libp2p-swarm", - "log 0.4.11", - "multihash", + "log", "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.2", - "smallvec 1.4.2", + "sha2 0.9.1", + "smallvec 1.5.1", "uint", - "unsigned-varint 0.4.0", + "unsigned-varint", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.22.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3173b5a6b2f690c29ae07798d85b9441a131ac76ddae9015ef22905b623d0c69" +checksum = "7b934ee03a361f317df7d75defa4177b285534c58f49d5e6e240278e13ef3f65" dependencies = [ - "async-std", + "async-io", "data-encoding", "dns-parser", - "either", - "futures 0.3.5", + "futures 0.3.8", + "if-watch", "lazy_static", "libp2p-core", "libp2p-swarm", - "log 0.4.11", - "net2", + "log", "rand 0.7.3", - "smallvec 1.4.2", + "smallvec 1.5.1", + "socket2", "void", - "wasm-timer", ] [[package]] name = "libp2p-mplex" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a73a799cc8410b36e40b8f4c4b6babbcb9efd3727111bf517876e4acfa612d3" +checksum = "ae2132b14045009b0f8e577a06e1459592ef0a89dedc58f3d4baf4eac956837b" dependencies = [ "bytes 0.5.6", - "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "libp2p-core", - "log 0.4.11", - "parking_lot 0.10.2", - "unsigned-varint 0.4.0", + "log", + "nohash-hasher", + "parking_lot 0.11.1", + "rand 0.7.3", + "smallvec 1.5.1", + "unsigned-varint", ] [[package]] name = "libp2p-noise" -version = "0.24.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ef6c490042f549fb1025f2892dfe6083d97a77558f450c1feebe748ca9eb15a" +checksum = "b9610a524bef4db383cd96b4ec3ec4722eafa72c7242fa89990b74166760583d" dependencies = [ "bytes 0.5.6", - "curve25519-dalek", - "futures 0.3.5", + "curve25519-dalek 3.0.0", + "futures 0.3.8", "lazy_static", "libp2p-core", - "log 0.4.11", + "log", "prost", "prost-build", "rand 0.7.3", - "sha2 0.8.2", + "sha2 0.9.1", "snow", "static_assertions", - "x25519-dalek", + "x25519-dalek 1.1.0", "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad063c21dfcea4518ac9e8bd4119d33a5b26c41e674f602f41f05617a368a5c8" +checksum = "659adf89356e04f65398bb74ee791b269e63da9e41b37f8dc19eaacd12487bfe" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", - "log 0.4.11", + "log", "rand 0.7.3", "void", "wasm-timer", @@ -3059,31 +3056,30 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903a12e99c72dbebefea258de887982adeacc7025baa1ceb10b7fa9928f54791" +checksum = "96dfe26270c91d4ff095030d1fcadd602f3fd84968ebd592829916d0715798a6" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "futures_codec", "libp2p-core", - "log 0.4.11", + "log", "prost", "prost-build", - "rw-stream-sink", - "unsigned-varint 0.4.0", + "unsigned-varint", "void", ] [[package]] name = "libp2p-pnet" -version = "0.19.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d0db10e139d22d7af0b23ed7949449ec86262798aa0fd01595abdbcb02dc87" +checksum = "96b3c2d5d26a9500e959a0e19743897239a6c4be78dadf99b70414301a70c006" dependencies = [ - "futures 0.3.5", - "log 0.4.11", - "pin-project", + "futures 0.3.8", + "log", + "pin-project 0.4.23", "rand 0.7.3", "salsa20", "sha3", @@ -3091,75 +3087,75 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.3.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c0c9e8a4cd69d97e9646c54313d007512f411aba8c5226cfcda16df6a6e84a3" +checksum = "bd96c3580fe59a9379ac7906c2f61c7f5ad3b7515362af0e72153a7cc9a45550" dependencies = [ "async-trait", "bytes 0.5.6", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", "libp2p-swarm", - "log 0.4.11", - "lru 0.6.0", + "log", + "lru", "minicbor", "rand 0.7.3", - "smallvec 1.4.2", - "unsigned-varint 0.5.1", + "smallvec 1.5.1", + "unsigned-varint", "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.22.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7193e444210132237b81b755ec7fe53f1c4bd2f53cf719729b94c0c72eb6eaa1" +checksum = "a6ecee54e85513a7301eb4681b3a6aac5b6d11f60d43097cf7624fd4450d7dfe" dependencies = [ "either", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", - "log 0.4.11", + "log", "rand 0.7.3", - "smallvec 1.4.2", + "smallvec 1.5.1", "void", "wasm-timer", ] [[package]] name = "libp2p-tcp" -version = "0.22.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f42ec130d7a37a7e47bf4398026b7ad9185c08ed26972e2720f8b94112796f" +checksum = "bc28c9ad6dc43f4c3950411cf808639d90307a076330e7996e5e94e70279bde0" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "get_if_addrs", + "if-addrs", "ipnet", "libp2p-core", - "log 0.4.11", + "log", "socket2", ] [[package]] name = "libp2p-uds" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea7acb0a034f70d7db94c300eba3f65c0f6298820105624088a9609c9974d77" +checksum = "9d821208d4b9af4b293a56dde470edd9f9fac8bb94a51f4f5327cc29a471b3f3" dependencies = [ "async-std", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", - "log 0.4.11", + "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34c1faac6f92c21fbe155417957863ea822fba9e9fd5eb24c0912336a100e63f" +checksum = "1e6ef400b231ba78e866b860445480ca21ee447e03034138c6d57cf2969d6bf4" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -3169,33 +3165,33 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.23.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d650534ebd99f48f6fa292ed5db10d30df2444943afde4407ceeddab8e513fca" +checksum = "d8a0af4ea43104a01c634ee1b8026ce11f9ee3766a894a44f9e1da5a0eb74fc0" dependencies = [ "async-tls", "either", - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", - "log 0.4.11", + "log", "quicksink", - "rustls", + "rustls 0.19.0", "rw-stream-sink", "soketto", "url 2.1.1", "webpki", - "webpki-roots 0.18.0", + "webpki-roots", ] [[package]] name = "libp2p-yamux" -version = "0.25.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "781d9b9f043dcdabc40640807125368596b849fd4d96cdca2dcf052fdf6f33fd" +checksum = "3be7ac000fa3e42ac09a6e658e48de34ac8ef9fff64a4e6e6b08dcc8f4b0e5f6" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p-core", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "thiserror", "yamux", ] @@ -3257,31 +3253,21 @@ dependencies = [ [[package]] name = "linregress" -version = "0.1.7" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9290cf6f928576eeb9c096c6fad9d8d452a0a1a70a2bbffa6e36064eedc0aac9" +checksum = "0d0ad4b5cc8385a881c561fac3501353d63d2a2b7a357b5064d71815c9a92724" dependencies = [ - "failure", "nalgebra", "statrs", ] -[[package]] -name = "lock_api" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" -dependencies = [ - "scopeguard 0.3.3", -] - [[package]] name = "lock_api" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" dependencies = [ - "scopeguard 1.1.0", + "scopeguard", ] [[package]] @@ -3290,16 +3276,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28247cc5a5be2f05fbcd76dd0cf2c7d3b5400cb978a28042abcd4fa0b3f8261c" dependencies = [ - "scopeguard 1.1.0", -] - -[[package]] -name = "log" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -dependencies = [ - "log 0.4.11", + "scopeguard", ] [[package]] @@ -3308,32 +3285,36 @@ version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", ] [[package]] -name = "lru" -version = "0.4.3" +name = "loom" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" +checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" dependencies = [ - "hashbrown 0.6.3", + "cfg-if 0.1.10", + "generator", + "scoped-tls", + "serde", + "serde_json", ] [[package]] name = "lru" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "111b945ac72ec09eb7bc62a0fbdc3cc6e80555a7245f52a69d3921a75b53b153" +checksum = "be716eb6878ca2263eb5d00a781aa13264a794f519fe6af4fbb2668b2d5441c0" dependencies = [ - "hashbrown 0.8.0", + "hashbrown 0.9.1", ] [[package]] name = "lru_time_cache" -version = "0.10.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb241df5c4caeb888755363fc95f8a896618dc0d435e9e775f7930cb099beab" +checksum = "ebac060fafad3adedd0c66a80741a92ff4bc8e94a273df2ba3770ab206f2e29a" [[package]] name = "mach" @@ -3437,7 +3418,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" dependencies = [ - "byteorder 1.3.4", + "byteorder", "keccak", "rand_core 0.5.1", "zeroize", @@ -3445,22 +3426,22 @@ dependencies = [ [[package]] name = "minicbor" -version = "0.5.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fc03ad6f8f548db7194a5ff5a6f96342ecae4e3ef67d2bf18bacc0e245cd041" +checksum = "0164190d1771b1458c3742075b057ed55d25cd9dfb930aade99315a1eb1fe12d" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.4.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c214bf3d90099b52f3e4b328ae0fe34837fd0fab683ad1e10fceb4629106df48" +checksum = "2e071b3159835ee91df62dbdbfdd7ec366b7ea77c838f43aff4acda6b61bcfb9" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -3478,13 +3459,13 @@ version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", "kernel32-sys", "libc", - "log 0.4.11", + "log", "miow 0.2.1", "net2", "slab", @@ -3498,7 +3479,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" dependencies = [ "lazycell", - "log 0.4.11", + "log", "mio", "slab", ] @@ -3509,7 +3490,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ - "log 0.4.11", + "log", "mio", "miow 0.3.5", "winapi 0.3.9", @@ -3556,17 +3537,29 @@ checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" [[package]] name = "multihash" -version = "0.11.2" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f75db05d738947aa5389863aadafbcf2e509d7ba099dc2ddcdf4fc66bf7a9e03" +checksum = "fb63389ee5fcd4df3f8727600f4a0c3df53c541f0ed4e8b50a9ae51a80fc1efe" dependencies = [ - "blake2b_simd", - "blake2s_simd", - "digest 0.8.1", - "sha-1", - "sha2 0.8.2", - "sha3", - "unsigned-varint 0.3.3", + "digest 0.9.0", + "generic-array 0.14.4", + "multihash-derive", + "sha2 0.9.1", + "unsigned-varint", +] + +[[package]] +name = "multihash-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f5653449cd45d502a53480ee08d7a599e8f4893d2bacb33c63d65bc20af6c1a" +dependencies = [ + "proc-macro-crate", + "proc-macro-error", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", + "synstructure", ] [[package]] @@ -3577,32 +3570,33 @@ checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" [[package]] name = "multistream-select" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9157e87afbc2ef0d84cc0345423d715f445edde00141c93721c162de35a05e5" +checksum = "dda822043bba2d6da31c4e14041f9794f8fb130a5959289038d0b809d8888614" dependencies = [ "bytes 0.5.6", - "futures 0.3.5", - "log 0.4.11", - "pin-project", - "smallvec 1.4.2", - "unsigned-varint 0.4.0", + "futures 0.3.8", + "log", + "pin-project 1.0.2", + "smallvec 1.5.1", + "unsigned-varint", ] [[package]] name = "nalgebra" -version = "0.18.1" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" +checksum = "d6b6147c3d50b4f3cdabfe2ecc94a0191fd3d6ad58aefd9664cf396285883486" dependencies = [ - "alga", "approx", - "generic-array 0.12.3", + "generic-array 0.13.2", "matrixmultiply", "num-complex", "num-rational", "num-traits 0.2.12", - "rand 0.6.5", + "rand 0.7.3", + "rand_distr", + "simba", "typenum", ] @@ -3615,13 +3609,23 @@ dependencies = [ "rand 0.3.23", ] +[[package]] +name = "nb-connect" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8123a81538e457d44b933a02faf885d3fe8408806b23fa700e8f01c6c3a98998" +dependencies = [ + "libc", + "winapi 0.3.9", +] + [[package]] name = "net2" version = "0.2.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] @@ -3634,7 +3638,7 @@ checksum = "b7fd5681d13fda646462cfbd4e5f2051279a89a544d50eb98c365b507246839f" dependencies = [ "bitflags", "bytes 0.4.12", - "cfg-if", + "cfg-if 0.1.10", "gcc", "libc", "void", @@ -3642,15 +3646,14 @@ dependencies = [ [[package]] name = "nix" -version = "0.17.0" +version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e4785f2c3b7589a0d0c1dd60285e1188adac4006e8abd6dd578e1567027363" +checksum = "b2ccba0cfe4fdf15982d1674c69b1fd80bad427d293849982668dfe454bd61f2" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 1.0.0", "libc", - "void", ] [[package]] @@ -3764,22 +3767,13 @@ dependencies = [ "wasmparser 0.57.0", ] -[[package]] -name = "once_cell" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532c29a261168a45ce28948f9537ddd7a5dd272cc513b3017b1e82a88f962c37" -dependencies = [ - "parking_lot 0.7.1", -] - [[package]] name = "once_cell" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad" dependencies = [ - "parking_lot 0.11.0", + "parking_lot 0.11.1", ] [[package]] @@ -3800,6 +3794,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" +[[package]] +name = "output_vt100" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "owning_ref" version = "0.4.1" @@ -3811,14 +3814,14 @@ dependencies = [ [[package]] name = "owo-colors" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1250cdd103eef6bd542b5ae82989f931fc00a41a27f60377338241594410f3" +checksum = "13370dae44474229701bb69b90b4f4dca6404cb0357a2d50d635f1171dc3aa7b" [[package]] name = "pallet-authority-discovery" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -3834,11 +3837,11 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "parity-scale-codec", "sp-authorship", "sp-inherents", @@ -3849,7 +3852,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -3874,7 +3877,7 @@ dependencies = [ [[package]] name = "pallet-balances" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -3888,7 +3891,7 @@ dependencies = [ [[package]] name = "pallet-collective" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -3904,7 +3907,7 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -3919,7 +3922,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -3934,7 +3937,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -3955,7 +3958,7 @@ dependencies = [ [[package]] name = "pallet-identity" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "enumflags2", "frame-benchmarking", @@ -3971,7 +3974,7 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -3991,7 +3994,7 @@ dependencies = [ [[package]] name = "pallet-indices" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4008,7 +4011,7 @@ dependencies = [ [[package]] name = "pallet-membership" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -4022,7 +4025,7 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4038,7 +4041,7 @@ dependencies = [ [[package]] name = "pallet-nicks" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -4052,7 +4055,7 @@ dependencies = [ [[package]] name = "pallet-offences" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -4067,7 +4070,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4088,7 +4091,7 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4104,7 +4107,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -4117,7 +4120,7 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "enumflags2", "frame-support", @@ -4132,7 +4135,7 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4147,11 +4150,11 @@ dependencies = [ [[package]] name = "pallet-session" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "pallet-timestamp", "parity-scale-codec", "serde", @@ -4167,7 +4170,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4183,7 +4186,7 @@ dependencies = [ [[package]] name = "pallet-society" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -4197,7 +4200,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4219,18 +4222,18 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "pallet-sudo" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", @@ -4244,12 +4247,12 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "parity-scale-codec", "serde", "sp-inherents", @@ -4262,14 +4265,14 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "frame-system", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "serde", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-core", "sp-io", "sp-runtime", @@ -4279,7 +4282,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4297,7 +4300,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-support", "parity-scale-codec", @@ -4310,7 +4313,7 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4325,7 +4328,7 @@ dependencies = [ [[package]] name = "pallet-utility" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-benchmarking", "frame-support", @@ -4341,7 +4344,7 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4362,26 +4365,26 @@ dependencies = [ "blake2-rfc", "crc32fast", "libc", - "log 0.4.11", + "log", "memmap", "parking_lot 0.10.2", ] [[package]] name = "parity-multiaddr" -version = "0.9.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2165a93382a93de55868dcbfa11e4a8f99676a9164eee6a2b4a9479ad319c257" +checksum = "2f51a30667591b14f96068b2d12f1306d07a41ebd98239d194356d4d9707ac16" dependencies = [ "arrayref", "bs58", - "byteorder 1.3.4", + "byteorder", "data-encoding", "multihash", "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.4.0", + "unsigned-varint", "url 2.1.1", ] @@ -4391,7 +4394,7 @@ version = "1.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c740e5fbcb6847058b40ac7e5574766c6388f585e184d769910fe0d3a2ca861" dependencies = [ - "arrayvec 0.5.1", + "arrayvec 0.5.2", "bitvec", "byte-slice-cast", "parity-scale-codec-derive", @@ -4405,9 +4408,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "198db82bb1c18fc00176004462dd809b2a6d851669550aa17af6dacd21ae0c14" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -4425,7 +4428,7 @@ dependencies = [ "bytes 0.4.12", "futures 0.1.29", "libc", - "log 0.4.11", + "log", "mio-named-pipes", "miow 0.3.5", "rand 0.7.3", @@ -4441,14 +4444,14 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "hashbrown 0.8.0", - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "jemallocator", "parity-util-mem-derive", "parking_lot 0.10.2", "primitive-types", - "smallvec 1.4.2", + "smallvec 1.5.1", "winapi 0.3.9", ] @@ -4458,11 +4461,20 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.18", - "syn 1.0.33", + "proc-macro2 1.0.24", + "syn 1.0.48", "synstructure", ] +[[package]] +name = "parity-wasm" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16ad52817c4d343339b3bc2e26861bd21478eda0b7509acf83505727000512ac" +dependencies = [ + "byteorder", +] + [[package]] name = "parity-wasm" version = "0.41.0" @@ -4475,10 +4487,10 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e02a625dd75084c2a7024f07c575b61b782f729d18702dabb3cdbf31911dc61" dependencies = [ - "byteorder 1.3.4", + "byteorder", "bytes 0.4.12", "httparse", - "log 0.4.11", + "log", "mio", "mio-extras", "rand 0.7.3", @@ -4493,16 +4505,6 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" -[[package]] -name = "parking_lot" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" -dependencies = [ - "lock_api 0.1.5", - "parking_lot_core 0.4.0", -] - [[package]] name = "parking_lot" version = "0.9.0" @@ -4526,35 +4528,22 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4893845fa2ca272e647da5d0e46660a314ead9c2fdd9a883aabc32e481a8733" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" dependencies = [ "instant", "lock_api 0.4.1", "parking_lot_core 0.8.0", ] -[[package]] -name = "parking_lot_core" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" -dependencies = [ - "libc", - "rand 0.6.5", - "rustc_version", - "smallvec 0.6.13", - "winapi 0.3.9", -] - [[package]] name = "parking_lot_core" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi 0.0.3", "libc", "redox_syscall", @@ -4569,11 +4558,11 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi 0.0.3", "libc", "redox_syscall", - "smallvec 1.4.2", + "smallvec 1.5.1", "winapi 0.3.9", ] @@ -4583,12 +4572,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi 0.1.0", "instant", "libc", "redox_syscall", - "smallvec 1.4.2", + "smallvec 1.5.1", "winapi 0.3.9", ] @@ -4617,9 +4606,17 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" dependencies = [ - "byteorder 1.3.4", + "byteorder", "crypto-mac 0.7.0", - "rayon", +] + +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", ] [[package]] @@ -4673,9 +4670,9 @@ checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -4705,7 +4702,16 @@ version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca4433fff2ae79342e497d9f8ee990d174071408f28f726d6d83af93e58e48aa" dependencies = [ - "pin-project-internal", + "pin-project-internal 0.4.23", +] + +[[package]] +name = "pin-project" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +dependencies = [ + "pin-project-internal 1.0.2", ] [[package]] @@ -4714,9 +4720,20 @@ version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c0e815c3ee9a031fdf5af21c10aa17c573c9c6a566328d99e3936c34e36461f" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.48", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +dependencies = [ + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -4725,6 +4742,12 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +[[package]] +name = "pin-project-lite" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" + [[package]] name = "pin-utils" version = "0.1.0" @@ -4745,16 +4768,17 @@ checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" [[package]] name = "polkadot" -version = "0.8.26" +version = "0.8.27" dependencies = [ "assert_cmd", "color-eyre", - "futures 0.3.5", - "nix 0.17.0", + "futures 0.3.8", + "nix 0.19.1", "parity-util-mem", "polkadot-cli", "polkadot-service", "tempfile", + "thiserror", ] [[package]] @@ -4763,9 +4787,9 @@ version = "0.1.0" dependencies = [ "assert_matches", "bitvec", - "env_logger 0.7.1", - "futures 0.3.5", - "log 0.4.11", + "env_logger 0.8.2", + "futures 0.3.8", + "log", "maplit", "parity-scale-codec", "polkadot-node-network-protocol", @@ -4773,11 +4797,11 @@ dependencies = [ "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", - "sc-keystore", "sp-application-crypto", "sp-core", "sp-keystore", - "tempfile", + "tracing", + "tracing-futures", ] [[package]] @@ -4785,10 +4809,10 @@ name = "polkadot-availability-distribution" version = "0.1.0" dependencies = [ "assert_matches", - "env_logger 0.7.1", - "futures 0.3.5", + "env_logger 0.8.2", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", + "log", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-network-protocol", @@ -4797,20 +4821,23 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "sc-keystore", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-application-crypto", "sp-core", "sp-keyring", "sp-keystore", "thiserror", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-cli" -version = "0.8.26" +version = "0.8.27" dependencies = [ "frame-benchmarking-cli", - "log 0.4.11", + "log", + "polkadot-parachain", "polkadot-service", "sc-cli", "sc-service", @@ -4820,6 +4847,8 @@ dependencies = [ "structopt", "substrate-browser-utils", "substrate-build-script-utils", + "thiserror", + "tracing-futures", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -4829,19 +4858,21 @@ name = "polkadot-collator-protocol" version = "0.1.0" dependencies = [ "assert_matches", - "env_logger 0.7.1", - "futures 0.3.5", + "env_logger 0.8.2", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", + "log", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-core", "sp-keyring", "thiserror", + "tracing", + "tracing-futures", ] [[package]] @@ -4856,7 +4887,7 @@ dependencies = [ [[package]] name = "polkadot-erasure-coding" -version = "0.8.26" +version = "0.8.27" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -4872,10 +4903,9 @@ version = "0.1.0" dependencies = [ "assert_matches", "async-trait", - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -4884,15 +4914,15 @@ dependencies = [ "sc-network", "sp-core", "sp-keyring", - "sp-runtime", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-node-collation-generation" version = "0.1.0" dependencies = [ - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -4901,6 +4931,8 @@ dependencies = [ "polkadot-primitives", "sp-core", "thiserror", + "tracing", + "tracing-futures", ] [[package]] @@ -4908,13 +4940,13 @@ name = "polkadot-node-core-av-store" version = "0.1.0" dependencies = [ "assert_matches", - "env_logger 0.7.1", - "futures 0.3.5", + "env_logger 0.8.2", + "futures 0.3.8", "futures-timer 3.0.2", "kvdb", "kvdb-memorydb", "kvdb-rocksdb", - "log 0.4.11", + "log", "parity-scale-codec", "polkadot-erasure-coding", "polkadot-node-subsystem", @@ -4923,9 +4955,11 @@ dependencies = [ "polkadot-overseer", "polkadot-primitives", "sc-service", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-core", "thiserror", + "tracing", + "tracing-futures", ] [[package]] @@ -4934,8 +4968,7 @@ version = "0.1.0" dependencies = [ "assert_matches", "bitvec", - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -4949,20 +4982,22 @@ dependencies = [ "sp-keyring", "sp-keystore", "thiserror", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-node-core-bitfield-signing" version = "0.1.0" dependencies = [ - "bitvec", - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-keystore", "thiserror", + "tracing", + "tracing-futures", "wasm-timer", ] @@ -4970,14 +5005,15 @@ dependencies = [ name = "polkadot-node-core-candidate-selection" version = "0.1.0" dependencies = [ - "futures 0.3.5", - "log 0.4.11", - "polkadot-node-primitives", + "futures 0.3.8", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-core", + "sp-keystore", "thiserror", + "tracing", + "tracing-futures", ] [[package]] @@ -4985,8 +5021,7 @@ name = "polkadot-node-core-candidate-validation" version = "0.1.0" dependencies = [ "assert_matches", - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", "parity-scale-codec", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -4996,13 +5031,15 @@ dependencies = [ "polkadot-primitives", "sp-core", "sp-keyring", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-node-core-chain-api" version = "0.1.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "maplit", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -5010,14 +5047,16 @@ dependencies = [ "polkadot-primitives", "sp-blockchain", "sp-core", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-node-core-proposer" version = "0.1.0" dependencies = [ - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", + "futures-timer 3.0.2", "polkadot-node-subsystem", "polkadot-overseer", "polkadot-primitives", @@ -5032,7 +5071,7 @@ dependencies = [ "sp-runtime", "sp-transaction-pool", "substrate-prometheus-endpoint", - "wasm-timer", + "tracing", ] [[package]] @@ -5040,30 +5079,31 @@ name = "polkadot-node-core-provisioner" version = "0.1.0" dependencies = [ "bitvec", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", - "sc-keystore", "sp-application-crypto", "sp-keystore", - "tempfile", "thiserror", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-node-core-runtime-api" version = "0.1.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", "sp-api", "sp-core", + "tracing", + "tracing-futures", ] [[package]] @@ -5080,7 +5120,7 @@ dependencies = [ name = "polkadot-node-primitives" version = "0.1.0" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "parity-scale-codec", "polkadot-primitives", "polkadot-statement-table", @@ -5095,22 +5135,23 @@ dependencies = [ "assert_matches", "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.1", + "pin-project 1.0.2", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem-test-helpers", "polkadot-primitives", "polkadot-statement-table", "sc-network", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-core", "substrate-prometheus-endpoint", "thiserror", + "tracing", + "tracing-futures", ] [[package]] @@ -5118,12 +5159,11 @@ name = "polkadot-node-subsystem-test-helpers" version = "0.1.0" dependencies = [ "async-trait", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.1", + "pin-project 1.0.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", @@ -5131,8 +5171,10 @@ dependencies = [ "polkadot-primitives", "polkadot-statement-table", "sc-network", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-core", + "tracing", + "tracing-futures", ] [[package]] @@ -5141,13 +5183,13 @@ version = "0.1.0" dependencies = [ "assert_matches", "async-trait", - "env_logger 0.7.1", - "futures 0.3.5", + "env_logger 0.8.2", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", + "log", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.1", + "pin-project 1.0.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -5159,6 +5201,8 @@ dependencies = [ "streamunordered", "substrate-prometheus-endpoint", "thiserror", + "tracing", + "tracing-futures", ] [[package]] @@ -5167,10 +5211,9 @@ version = "0.1.0" dependencies = [ "async-trait", "femme", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "kv-log-macro", - "log 0.4.11", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -5179,17 +5222,19 @@ dependencies = [ "sc-client-api", "sp-core", "streamunordered", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-parachain" -version = "0.8.26" +version = "0.8.27" dependencies = [ "derive_more", - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", + "log", "parity-scale-codec", - "parking_lot 0.10.2", + "parking_lot 0.11.1", "polkadot-core-primitives", "sc-executor", "serde", @@ -5200,6 +5245,7 @@ dependencies = [ "sp-runtime", "sp-std", "sp-wasm-interface", + "thiserror", ] [[package]] @@ -5207,19 +5253,25 @@ name = "polkadot-pov-distribution" version = "0.1.0" dependencies = [ "assert_matches", - "futures 0.3.5", - "log 0.4.11", + "env_logger 0.8.2", + "futures 0.3.8", + "log", "polkadot-node-network-protocol", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", "polkadot-node-subsystem-util", "polkadot-primitives", + "smallvec 1.5.1", "sp-core", + "sp-keyring", + "thiserror", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-primitives" -version = "0.8.26" +version = "0.8.27" dependencies = [ "bitvec", "frame-system", @@ -5245,7 +5297,7 @@ dependencies = [ [[package]] name = "polkadot-rpc" -version = "0.8.26" +version = "0.8.27" dependencies = [ "jsonrpc-core", "pallet-transaction-payment-rpc", @@ -5274,7 +5326,7 @@ dependencies = [ [[package]] name = "polkadot-runtime" -version = "0.8.26" +version = "0.8.27" dependencies = [ "bitvec", "frame-benchmarking", @@ -5283,9 +5335,9 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", - "hex-literal 0.2.1", + "hex-literal", "libsecp256k1", - "log 0.3.9", + "log", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", @@ -5322,7 +5374,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -5340,22 +5392,22 @@ dependencies = [ "sp-trie", "sp-version", "static_assertions", - "substrate-wasm-builder-runner", - "tiny-keccak 1.5.0", + "substrate-wasm-builder", + "tiny-keccak", "trie-db", ] [[package]] name = "polkadot-runtime-common" -version = "0.8.26" +version = "0.8.27" dependencies = [ "bitvec", "frame-benchmarking", "frame-support", "frame-system", - "hex-literal 0.2.1", + "hex-literal", "libsecp256k1", - "log 0.3.9", + "log", "pallet-authorship", "pallet-babe", "pallet-balances", @@ -5395,13 +5447,14 @@ name = "polkadot-runtime-parachains" version = "0.8.0" dependencies = [ "bitvec", + "derive_more", "frame-benchmarking", "frame-support", "frame-system", - "futures 0.3.5", - "hex-literal 0.2.1", + "futures 0.3.8", + "hex-literal", "libsecp256k1", - "log 0.3.9", + "log", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", @@ -5421,7 +5474,6 @@ dependencies = [ "rustc-hex", "sc-keystore", "serde", - "serde_derive", "serde_json", "sp-api", "sp-application-crypto", @@ -5436,26 +5488,23 @@ dependencies = [ "sp-std", "sp-trie", "sp-version", + "xcm", ] [[package]] name = "polkadot-service" version = "0.8.3" dependencies = [ - "env_logger 0.8.1", + "env_logger 0.8.2", "frame-benchmarking", "frame-system-rpc-runtime-api", - "futures 0.3.5", - "hex-literal 0.2.1", + "futures 0.3.8", + "hex-literal", "kusama-runtime", - "lazy_static", - "log 0.4.11", "pallet-babe", "pallet-im-online", "pallet-staking", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", - "parking_lot 0.9.0", "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-collator-protocol", @@ -5478,9 +5527,10 @@ dependencies = [ "polkadot-primitives", "polkadot-rpc", "polkadot-runtime", + "polkadot-runtime-parachains", "polkadot-statement-distribution", "polkadot-test-client", - "rococo-v1-runtime", + "rococo-runtime", "sc-authority-discovery", "sc-block-builder", "sc-chain-spec", @@ -5488,6 +5538,7 @@ dependencies = [ "sc-client-db", "sc-consensus", "sc-consensus-babe", + "sc-consensus-slots", "sc-executor", "sc-finality-grandpa", "sc-network", @@ -5495,7 +5546,6 @@ dependencies = [ "sc-telemetry", "sc-transaction-pool", "serde", - "slog", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -5514,6 +5564,8 @@ dependencies = [ "sp-transaction-pool", "sp-trie", "substrate-prometheus-endpoint", + "tracing", + "tracing-futures", "westend-runtime", ] @@ -5521,11 +5573,10 @@ dependencies = [ name = "polkadot-statement-distribution" version = "0.1.0" dependencies = [ - "arrayvec 0.5.1", + "arrayvec 0.5.2", "assert_matches", - "futures 0.3.5", + "futures 0.3.8", "indexmap", - "log 0.4.11", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -5538,11 +5589,13 @@ dependencies = [ "sp-keyring", "sp-keystore", "sp-staking", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-statement-table" -version = "0.8.26" +version = "0.8.27" dependencies = [ "parity-scale-codec", "polkadot-primitives", @@ -5551,9 +5604,10 @@ dependencies = [ [[package]] name = "polkadot-test-client" -version = "0.8.26" +version = "0.8.27" dependencies = [ "parity-scale-codec", + "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", "polkadot-test-service", @@ -5574,16 +5628,16 @@ dependencies = [ [[package]] name = "polkadot-test-runtime" -version = "0.8.26" +version = "0.8.27" dependencies = [ "bitvec", "frame-executive", "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "hex-literal 0.2.1", + "hex-literal", "libsecp256k1", - "log 0.3.9", + "log", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", @@ -5610,7 +5664,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -5627,24 +5681,26 @@ dependencies = [ "sp-transaction-pool", "sp-trie", "sp-version", - "substrate-wasm-builder-runner", - "tiny-keccak 1.5.0", + "substrate-wasm-builder", + "tiny-keccak", ] [[package]] name = "polkadot-test-service" -version = "0.8.26" +version = "0.8.27" dependencies = [ "frame-benchmarking", "frame-system", "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.8", "hex", - "log 0.4.11", "pallet-balances", "pallet-staking", "pallet-transaction-payment", + "polkadot-node-primitives", + "polkadot-node-subsystem", "polkadot-overseer", + "polkadot-parachain", "polkadot-primitives", "polkadot-rpc", "polkadot-runtime-common", @@ -5660,12 +5716,12 @@ dependencies = [ "sc-consensus-babe", "sc-executor", "sc-finality-grandpa", - "sc-informant", "sc-network", "sc-service", "sc-transaction-pool", "serde_json", "sp-arithmetic", + "sp-authority-discovery", "sp-blockchain", "sp-consensus", "sp-consensus-babe", @@ -5679,14 +5735,16 @@ dependencies = [ "substrate-test-utils", "tempfile", "tokio 0.2.21", + "tracing", + "tracing-futures", ] [[package]] name = "polkadot-validation" -version = "0.8.26" +version = "0.8.27" dependencies = [ - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", + "log", "parity-scale-codec", "polkadot-parachain", "polkadot-primitives", @@ -5711,14 +5769,14 @@ dependencies = [ [[package]] name = "polling" -version = "1.1.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0720e0b9ea9d52451cf29d3413ba8a9303f8815d9d9653ef70e03ff73e65566" +checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", - "log 0.4.11", - "wepoll-sys-stjepang", + "log", + "wepoll-sys", "winapi 0.3.9", ] @@ -5737,7 +5795,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9a50142b55ab3ed0e9f68dfb3709f1d90d29da24e91033f28b96330643107dc" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "universal-hash", ] @@ -5775,12 +5833,14 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "0.5.1" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a029430f0d744bc3d15dd474d591bed2402b645d024583082b9f63bb936dac6" +checksum = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427" dependencies = [ "ansi_term 0.11.0", + "ctor", "difference", + "output_vt100", ] [[package]] @@ -5797,44 +5857,42 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" dependencies = [ "toml", ] [[package]] name = "proc-macro-error" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", "version_check", ] [[package]] name = "proc-macro-error-attr" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", - "syn-mid", "version_check", ] [[package]] name = "proc-macro-hack" -version = "0.5.16" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro-nested" @@ -5853,9 +5911,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.18" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" dependencies = [ "unicode-xid 0.2.1", ] @@ -5866,10 +5924,10 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30d70cf4412832bcac9cffe27906f4a66e450d323525e977168c70d1b36120ae" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fnv", "lazy_static", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "regex", "thiserror", ] @@ -5893,7 +5951,7 @@ dependencies = [ "bytes 0.5.6", "heck", "itertools 0.8.2", - "log 0.4.11", + "log", "multimap", "petgraph", "prost", @@ -5910,9 +5968,9 @@ checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" dependencies = [ "anyhow", "itertools 0.8.2", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -5931,9 +5989,9 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f53bc2558e8376358ebdc28301546471d67336584f6438ed4b7c7457a055fd7" dependencies = [ - "byteorder 1.3.4", - "log 0.4.11", - "parity-wasm", + "byteorder", + "log", + "parity-wasm 0.41.0", ] [[package]] @@ -5956,7 +6014,7 @@ checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" dependencies = [ "futures-core", "futures-sink", - "pin-project-lite", + "pin-project-lite 0.1.7", ] [[package]] @@ -5974,7 +6032,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", ] [[package]] @@ -6006,19 +6064,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "rand" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" -dependencies = [ - "cloudabi 0.0.3", - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "winapi 0.3.9", -] - [[package]] name = "rand" version = "0.6.5" @@ -6096,6 +6141,15 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_distr" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96977acbdd3a6576fb1d27391900035bf3863d4a16422973a409b488cf29ffb2" +dependencies = [ + "rand 0.7.3", +] + [[package]] name = "rand_hc" version = "0.1.0" @@ -6251,7 +6305,7 @@ version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a415a013dd7c5d4221382329a5a3482566da675737494935cbbbcdec04662f9d" dependencies = [ - "smallvec 1.4.2", + "smallvec 1.5.1", ] [[package]] @@ -6269,9 +6323,9 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d21b475ab879ef0e315ad99067fa25778c3b0377f57f1b00207448dac1a3144" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -6280,16 +6334,16 @@ version = "0.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ba8aaf5fe7cf307c6dbdaeed85478961d29e25e3bee5169e11b92fa9f027a8" dependencies = [ - "log 0.4.11", + "log", "rustc-hash", - "smallvec 1.4.2", + "smallvec 1.5.1", ] [[package]] name = "regex" -version = "1.3.9" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" dependencies = [ "aho-corasick", "memchr", @@ -6303,15 +6357,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" dependencies = [ - "byteorder 1.3.4", + "byteorder", "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.18" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" +checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" [[package]] name = "region" @@ -6348,7 +6402,7 @@ checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4" dependencies = [ "cc", "libc", - "once_cell 1.4.1", + "once_cell", "spin", "untrusted", "web-sys", @@ -6366,8 +6420,8 @@ dependencies = [ ] [[package]] -name = "rococo-v1-runtime" -version = "0.8.26" +name = "rococo-runtime" +version = "0.8.27" dependencies = [ "frame-executive", "frame-support", @@ -6384,6 +6438,7 @@ dependencies = [ "pallet-session", "pallet-staking", "pallet-staking-reward-curve", + "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -6394,7 +6449,7 @@ dependencies = [ "polkadot-runtime-parachains", "serde", "serde_derive", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -6409,14 +6464,14 @@ dependencies = [ "sp-std", "sp-transaction-pool", "sp-version", - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] name = "rpassword" -version = "4.0.5" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99371657d3c8e4d816fb6221db98fa408242b0b53bac08f8676a41f8554fe99f" +checksum = "d755237fc0f99d98641540e66abac8bc46a0652f19148ac9e21de2da06b326c9" dependencies = [ "libc", "winapi 0.3.9", @@ -6464,7 +6519,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" dependencies = [ - "semver", + "semver 0.9.0", ] [[package]] @@ -6474,7 +6529,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cac94b333ee2aac3284c5b8a1b7fb4dd11cba88c244e3fe33cdbd047af0eb693" dependencies = [ "base64 0.12.3", - "log 0.4.11", + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "064fd21ff87c6e87ed4506e68beb42459caa4a0e2eb144932e6776768556980b" +dependencies = [ + "base64 0.13.0", + "log", "ring", "sct", "webpki", @@ -6487,7 +6555,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629d439a7672da82dd955498445e496ee2096fe2117b9f796558a43fdb9e59b8" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.18.0", "schannel", "security-framework", ] @@ -6498,8 +6566,8 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.5", - "pin-project", + "futures 0.3.8", + "pin-project 0.4.23", "static_assertions", ] @@ -6520,43 +6588,39 @@ dependencies = [ [[package]] name = "salsa20" -version = "0.3.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2324b0e8c3bb9a586a571fdb3136f70e7e2c748de00a78043f86e0cff91f91fe" +checksum = "c7f47b10fa80f6969bbbd9c8e7cc998f082979d402a9e10579e2303a87955395" dependencies = [ - "byteorder 1.3.4", - "salsa20-core", - "stream-cipher 0.3.2", + "stream-cipher 0.7.1", ] [[package]] -name = "salsa20-core" -version = "0.2.3" +name = "same-file" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fe6cc1b9f5a5867853ade63099de70f042f7679e408d1ffe52821c9248e6e69" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ - "stream-cipher 0.3.2", + "winapi-util", ] [[package]] name = "sc-authority-discovery" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "async-trait", - "bytes 0.5.6", "derive_more", "either", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", - "log 0.4.11", + "log", "parity-scale-codec", "prost", "prost-build", "rand 0.7.3", "sc-client-api", - "sc-keystore", "sc-network", "serde_json", "sp-api", @@ -6571,11 +6635,11 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", + "log", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -6589,13 +6653,12 @@ dependencies = [ "sp-runtime", "sp-transaction-pool", "substrate-prometheus-endpoint", - "tokio-executor 0.2.0-alpha.6", ] [[package]] name = "sc-block-builder" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -6612,9 +6675,9 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "parity-scale-codec", "sc-chain-spec-derive", "sc-consensus-babe", @@ -6633,28 +6696,26 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "sc-cli" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "ansi_term 0.12.1", "atty", - "bip39", "chrono", "fdlimit", - "futures 0.3.5", + "futures 0.3.8", "hex", "libp2p", - "log 0.4.11", + "log", "names", "parity-scale-codec", "rand 0.7.3", @@ -6679,6 +6740,7 @@ dependencies = [ "sp-version", "structopt", "thiserror", + "tiny-bip39", "tokio 0.2.21", "tracing", "tracing-log", @@ -6688,31 +6750,29 @@ dependencies = [ [[package]] name = "sc-cli-proc-macro" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "sc-client-api" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", "fnv", - "futures 0.3.5", + "futures 0.3.8", "hash-db", - "hex-literal 0.3.1", "kvdb", "lazy_static", - "log 0.4.11", + "log", "parity-scale-codec", "parking_lot 0.10.2", "sc-executor", - "sc-telemetry", "sp-api", "sp-blockchain", "sp-consensus", @@ -6720,7 +6780,6 @@ dependencies = [ "sp-database", "sp-externalities", "sp-inherents", - "sp-keyring", "sp-keystore", "sp-runtime", "sp-state-machine", @@ -6736,7 +6795,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "blake2-rfc", "hash-db", @@ -6744,7 +6803,7 @@ dependencies = [ "kvdb-memorydb", "kvdb-rocksdb", "linked-hash-map", - "log 0.4.11", + "log", "parity-db", "parity-scale-codec", "parity-util-mem", @@ -6766,7 +6825,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "sc-client-api", "sp-blockchain", @@ -6777,13 +6836,13 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", "fork-tree", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", + "log", "merlin", "num-bigint", "num-rational", @@ -6822,10 +6881,10 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", @@ -6846,7 +6905,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "fork-tree", "parity-scale-codec", @@ -6859,17 +6918,18 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", + "log", "parity-scale-codec", "parking_lot 0.10.2", "sc-client-api", "sc-telemetry", "sp-api", "sp-application-crypto", + "sp-arithmetic", "sp-blockchain", "sp-consensus", "sp-consensus-slots", @@ -6877,14 +6937,16 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", + "thiserror", ] [[package]] name = "sc-consensus-uncles" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "log 0.4.11", + "log", "sc-client-api", "sp-authorship", "sp-consensus", @@ -6896,14 +6958,14 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", "lazy_static", "libsecp256k1", - "log 0.4.11", + "log", "parity-scale-codec", - "parity-wasm", + "parity-wasm 0.41.0", "parking_lot 0.10.2", "sc-executor-common", "sc-executor-wasmi", @@ -6925,26 +6987,25 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", - "log 0.4.11", "parity-scale-codec", - "parity-wasm", + "parity-wasm 0.41.0", "sp-allocator", "sp-core", - "sp-runtime-interface", "sp-serializer", "sp-wasm-interface", + "thiserror", "wasmi", ] [[package]] name = "sc-executor-wasmi" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "log 0.4.11", + "log", "parity-scale-codec", "sc-executor-common", "sp-allocator", @@ -6957,11 +7018,11 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "log 0.4.11", + "log", "parity-scale-codec", - "parity-wasm", + "parity-wasm 0.41.0", "pwasm-utils", "sc-executor-common", "scoped-tls", @@ -6975,17 +7036,17 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", "finality-grandpa", "fork-tree", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", - "log 0.4.11", + "log", "parity-scale-codec", "parking_lot 0.10.2", - "pin-project", + "pin-project 0.4.23", "rand 0.7.3", "sc-block-builder", "sc-client-api", @@ -7012,16 +7073,16 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", "finality-grandpa", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", - "log 0.4.11", + "log", "parity-scale-codec", "sc-client-api", "sc-finality-grandpa", @@ -7036,11 +7097,11 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "ansi_term 0.12.1", - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", + "log", "parity-util-mem", "sc-client-api", "sc-network", @@ -7054,11 +7115,11 @@ dependencies = [ [[package]] name = "sc-keystore" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-util", "hex", "merlin", @@ -7074,7 +7135,7 @@ dependencies = [ [[package]] name = "sc-light" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "hash-db", "lazy_static", @@ -7093,7 +7154,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "async-std", "async-trait", @@ -7105,7 +7166,7 @@ dependencies = [ "erased-serde", "fnv", "fork-tree", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "futures_codec", "hex", @@ -7113,12 +7174,12 @@ dependencies = [ "libp2p", "linked-hash-map", "linked_hash_set", - "log 0.4.11", - "lru 0.4.3", + "log", + "lru", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.10.2", - "pin-project", + "parking_lot 0.11.1", + "pin-project 0.4.23", "prost", "prost-build", "rand 0.7.3", @@ -7129,7 +7190,7 @@ dependencies = [ "serde_json", "slog", "slog_derive", - "smallvec 0.6.13", + "smallvec 1.5.1", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -7138,7 +7199,7 @@ dependencies = [ "sp-utils", "substrate-prometheus-endpoint", "thiserror", - "unsigned-varint 0.4.0", + "unsigned-varint", "void", "wasm-timer", "zeroize", @@ -7147,13 +7208,13 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", - "log 0.4.11", - "lru 0.4.3", + "log", + "lru", "sc-network", "sp-runtime", "wasm-timer", @@ -7162,15 +7223,15 @@ dependencies = [ [[package]] name = "sc-offchain" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "bytes 0.5.6", "fnv", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "hyper 0.13.6", "hyper-rustls", - "log 0.4.11", + "log", "num_cpus", "parity-scale-codec", "parking_lot 0.10.2", @@ -7189,11 +7250,11 @@ dependencies = [ [[package]] name = "sc-peerset" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "libp2p", - "log 0.4.11", + "log", "serde_json", "sp-utils", "wasm-timer", @@ -7202,22 +7263,22 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "log 0.4.11", + "log", "substrate-prometheus-endpoint", ] [[package]] name = "sc-rpc" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", - "log 0.4.11", + "log", "parity-scale-codec", "parking_lot 0.10.2", "sc-block-builder", @@ -7225,6 +7286,7 @@ dependencies = [ "sc-executor", "sc-keystore", "sc-rpc-api", + "sc-tracing", "serde_json", "sp-api", "sp-blockchain", @@ -7244,15 +7306,15 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", - "log 0.4.11", + "log", "parity-scale-codec", "parking_lot 0.10.2", "serde", @@ -7268,7 +7330,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "futures 0.1.29", "jsonrpc-core", @@ -7276,7 +7338,7 @@ dependencies = [ "jsonrpc-ipc-server", "jsonrpc-pubsub", "jsonrpc-ws-server", - "log 0.4.11", + "log", "serde", "serde_json", "sp-runtime", @@ -7286,23 +7348,22 @@ dependencies = [ [[package]] name = "sc-service" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "derive_more", - "directories", + "directories 3.0.1", "exit-future", "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "hash-db", "jsonrpc-core", "jsonrpc-pubsub", "lazy_static", - "log 0.4.11", + "log", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.2", - "pin-project", + "pin-project 0.4.23", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -7342,6 +7403,7 @@ dependencies = [ "sp-version", "substrate-prometheus-endpoint", "tempfile", + "thiserror", "tracing", "tracing-futures", "wasm-timer", @@ -7350,21 +7412,22 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "log 0.4.11", + "log", "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.10.2", "sc-client-api", "sp-core", + "thiserror", ] [[package]] name = "sc-sync-state-rpc" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -7378,19 +7441,20 @@ dependencies = [ "serde_json", "sp-blockchain", "sp-runtime", + "thiserror", ] [[package]] name = "sc-telemetry" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", - "log 0.4.11", + "log", "parking_lot 0.10.2", - "pin-project", + "pin-project 0.4.23", "rand 0.7.3", "serde", "slog", @@ -7404,11 +7468,15 @@ dependencies = [ [[package]] name = "sc-tracing" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ + "ansi_term 0.12.1", "erased-serde", - "log 0.4.11", + "lazy_static", + "log", + "once_cell", "parking_lot 0.10.2", + "regex", "rustc-hash", "sc-telemetry", "serde", @@ -7417,18 +7485,19 @@ dependencies = [ "sp-tracing", "tracing", "tracing-core", + "tracing-log", "tracing-subscriber", ] [[package]] name = "sc-transaction-graph" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", - "futures 0.3.5", + "futures 0.3.8", "linked-hash-map", - "log 0.4.11", + "log", "parity-util-mem", "parking_lot 0.10.2", "retain_mut", @@ -7438,19 +7507,19 @@ dependencies = [ "sp-runtime", "sp-transaction-pool", "sp-utils", + "thiserror", "wasm-timer", ] [[package]] name = "sc-transaction-pool" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-diagnose", "intervalier", - "log 0.4.11", + "log", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.2", @@ -7464,6 +7533,7 @@ dependencies = [ "sp-transaction-pool", "sp-utils", "substrate-prometheus-endpoint", + "thiserror", "wasm-timer", ] @@ -7484,12 +7554,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ "arrayref", - "arrayvec 0.5.1", - "curve25519-dalek", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.0", "getrandom", "merlin", "rand 0.7.3", "rand_core 0.5.1", + "serde", "sha2 0.8.2", "subtle 2.2.3", "zeroize", @@ -7501,12 +7572,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" -[[package]] -name = "scopeguard" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" - [[package]] name = "scopeguard" version = "1.1.0" @@ -7528,9 +7593,9 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e367622f934864ffa1c704ba2b82280aab856e3d8213c84c5720257eb34b15b9" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -7545,9 +7610,9 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182278ed645df3477a9c27bfee0621c621aa16f6972635f7f795dae3d81070f" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" dependencies = [ "zeroize", ] @@ -7575,13 +7640,32 @@ dependencies = [ "libc", ] +[[package]] +name = "semver" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" +dependencies = [ + "semver-parser 0.7.0", +] + [[package]] name = "semver" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.0", + "serde", ] [[package]] @@ -7591,10 +7675,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] -name = "send_wrapper" -version = "0.2.0" +name = "semver-parser" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" +checksum = "0e012c6c5380fb91897ba7b9261a0f565e624e869d42fe1a1d03fa0d68a083d5" +dependencies = [ + "pest", + "pest_derive", +] [[package]] name = "send_wrapper" @@ -7616,29 +7704,29 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.117" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" +checksum = "06c64263859d87aa2eb554587e2d23183398d617427327cf2b3d0ed8c69e4800" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.117" +version = "1.0.118" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" +checksum = "c84d3526699cd55261af4b941e4e725444df67aa4f9e6a3564f18030d12672df" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "serde_json" -version = "1.0.59" +version = "1.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" +checksum = "1500e84d27fe482ed1dc791a56eddc2f230046a040fa908c08bda1d9fb615779" dependencies = [ "itoa", "ryu", @@ -7676,7 +7764,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2933378ddfeda7ea26f48c555bdad8bb446bf8a3d17832dc83e380d444cfb8c1" dependencies = [ "block-buffer 0.9.0", - "cfg-if", + "cfg-if 0.1.10", "cpuid-bool", "digest 0.9.0", "opaque-debug 0.3.0", @@ -7684,24 +7772,24 @@ dependencies = [ [[package]] name = "sha3" -version = "0.8.2" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd26bc0e7a2e3a7c959bc494caf58b72ee0c71d67704e9520f736ca7e4853ecf" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ - "block-buffer 0.7.3", - "byte-tools", - "digest 0.8.1", + "block-buffer 0.9.0", + "digest 0.9.0", "keccak", - "opaque-debug 0.2.3", + "opaque-debug 0.3.0", ] [[package]] name = "sharded-slab" -version = "0.0.9" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e" +checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" dependencies = [ "lazy_static", + "loom", ] [[package]] @@ -7710,10 +7798,10 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf3ab0cdff84d6c66fc9e268010ea6508e58ee942575afb66f2cf194bb218bb4" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "enum_primitive", "libc", - "log 0.4.11", + "log", "memrange", "nix 0.10.0", "quick-error 1.2.3", @@ -7756,6 +7844,18 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "65211b7b6fc3f14ff9fc7a2011a434e3e6880585bd2e9e9396315ae24cbf7852" +[[package]] +name = "simba" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb931b1367faadea6b1ab1c306a860ec17aaa5fa39f367d0c744e69d971a1fb2" +dependencies = [ + "approx", + "num-complex", + "num-traits 0.2.12", + "paste", +] + [[package]] name = "slab" version = "0.4.2" @@ -7801,9 +7901,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -7817,9 +7917,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.4.2" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252" +checksum = "ae524f056d7d770e174287294f562e95044c68e88dec909a00d2094805db9d75" [[package]] name = "snow" @@ -7836,16 +7936,16 @@ dependencies = [ "rustc_version", "sha2 0.9.1", "subtle 2.2.3", - "x25519-dalek", + "x25519-dalek 0.6.0", ] [[package]] name = "socket2" -version = "0.3.12" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" +checksum = "2c29947abdee2a218277abeca306f25789c938e500ea5a9d4b12a5a504466902" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "redox_syscall", "winapi 0.3.9", @@ -7860,9 +7960,9 @@ dependencies = [ "base64 0.12.3", "bytes 0.5.6", "flate2", - "futures 0.3.5", + "futures 0.3.8", "httparse", - "log 0.4.11", + "log", "rand 0.7.3", "sha-1", ] @@ -7870,19 +7970,19 @@ dependencies = [ [[package]] name = "sp-allocator" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "derive_more", - "log 0.4.11", + "log", "sp-core", "sp-std", "sp-wasm-interface", + "thiserror", ] [[package]] name = "sp-api" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "hash-db", "parity-scale-codec", @@ -7892,24 +7992,25 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-version", + "thiserror", ] [[package]] name = "sp-api-proc-macro" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "blake2-rfc", "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "sp-application-crypto" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "serde", @@ -7921,7 +8022,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "integer-sqrt", "num-traits 0.2.12", @@ -7934,7 +8035,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sp-api", @@ -7946,7 +8047,7 @@ dependencies = [ [[package]] name = "sp-authorship" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7957,7 +8058,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sp-api", @@ -7969,24 +8070,25 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "derive_more", - "log 0.4.11", - "lru 0.4.3", + "futures 0.3.8", + "log", + "lru", "parity-scale-codec", "parking_lot 0.10.2", - "sp-block-builder", + "sp-api", "sp-consensus", "sp-database", "sp-runtime", "sp-state-machine", + "thiserror", ] [[package]] name = "sp-chain-spec" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "serde", "serde_json", @@ -7995,13 +8097,12 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "derive_more", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "libp2p", - "log 0.4.11", + "log", "parity-scale-codec", "parking_lot 0.10.2", "serde", @@ -8015,13 +8116,14 @@ dependencies = [ "sp-utils", "sp-version", "substrate-prometheus-endpoint", + "thiserror", "wasm-timer", ] [[package]] name = "sp-consensus-babe" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "merlin", "parity-scale-codec", @@ -8041,7 +8143,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -8050,7 +8152,7 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -8062,21 +8164,21 @@ dependencies = [ [[package]] name = "sp-core" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "base58", "blake2-rfc", - "byteorder 1.3.4", + "byteorder", "dyn-clonable", "ed25519-dalek", - "futures 0.3.5", + "futures 0.3.8", "hash-db", "hash256-std-hasher", "hex", "impl-serde", "lazy_static", "libsecp256k1", - "log 0.4.11", + "log", "merlin", "num-traits 0.2.12", "parity-scale-codec", @@ -8095,8 +8197,9 @@ dependencies = [ "sp-std", "sp-storage", "substrate-bip39", + "thiserror", "tiny-bip39", - "tiny-keccak 2.0.2", + "tiny-keccak", "twox-hash", "wasmi", "zeroize", @@ -8105,7 +8208,7 @@ dependencies = [ [[package]] name = "sp-database" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "kvdb", "parking_lot 0.10.2", @@ -8114,17 +8217,17 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "sp-externalities" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "environmental", "parity-scale-codec", @@ -8135,10 +8238,10 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "finality-grandpa", - "log 0.4.11", + "log", "parity-scale-codec", "serde", "sp-api", @@ -8152,24 +8255,24 @@ dependencies = [ [[package]] name = "sp-inherents" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "derive_more", "parity-scale-codec", "parking_lot 0.10.2", "sp-core", "sp-std", + "thiserror", ] [[package]] name = "sp-io" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "hash-db", "libsecp256k1", - "log 0.4.11", + "log", "parity-scale-codec", "parking_lot 0.10.2", "sp-core", @@ -8188,7 +8291,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "lazy_static", "sp-core", @@ -8199,15 +8302,16 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "async-trait", "derive_more", - "futures 0.3.5", + "futures 0.3.8", "merlin", "parity-scale-codec", "parking_lot 0.10.2", "schnorrkel", + "serde", "sp-core", "sp-externalities", ] @@ -8215,7 +8319,7 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "serde", @@ -8227,18 +8331,18 @@ dependencies = [ [[package]] name = "sp-npos-elections-compact" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "sp-offchain" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "sp-api", "sp-core", @@ -8248,16 +8352,15 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "backtrace", - "log 0.4.11", ] [[package]] name = "sp-rpc" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "serde", "sp-core", @@ -8266,12 +8369,12 @@ dependencies = [ [[package]] name = "sp-runtime" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "either", "hash256-std-hasher", - "impl-trait-for-tuples", - "log 0.4.11", + "impl-trait-for-tuples 0.1.3", + "log", "parity-scale-codec", "parity-util-mem", "paste", @@ -8280,7 +8383,6 @@ dependencies = [ "sp-application-crypto", "sp-arithmetic", "sp-core", - "sp-inherents", "sp-io", "sp-std", ] @@ -8288,8 +8390,9 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ + "impl-trait-for-tuples 0.1.3", "parity-scale-codec", "primitive-types", "sp-externalities", @@ -8304,19 +8407,19 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "Inflector", "proc-macro-crate", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] name = "sp-serializer" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "serde", "serde_json", @@ -8325,7 +8428,7 @@ dependencies = [ [[package]] name = "sp-session" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sp-api", @@ -8338,7 +8441,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -8348,20 +8451,21 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "hash-db", - "log 0.4.11", + "log", "num-traits 0.2.12", "parity-scale-codec", "parking_lot 0.10.2", "rand 0.7.3", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-core", "sp-externalities", "sp-panic-handler", "sp-std", "sp-trie", + "thiserror", "trie-db", "trie-root", ] @@ -8369,12 +8473,12 @@ dependencies = [ [[package]] name = "sp-std" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" [[package]] name = "sp-storage" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8387,9 +8491,9 @@ dependencies = [ [[package]] name = "sp-tasks" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "log 0.4.11", + "log", "sp-core", "sp-externalities", "sp-io", @@ -8400,9 +8504,9 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "parity-scale-codec", "sp-api", "sp-inherents", @@ -8414,9 +8518,9 @@ dependencies = [ [[package]] name = "sp-tracing" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "log 0.4.11", + "log", "parity-scale-codec", "sp-std", "tracing", @@ -8427,22 +8531,23 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "derive_more", - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", + "log", "parity-scale-codec", "serde", "sp-api", "sp-blockchain", "sp-runtime", + "thiserror", ] [[package]] name = "sp-trie" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "hash-db", "memory-db", @@ -8456,9 +8561,9 @@ dependencies = [ [[package]] name = "sp-utils" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "futures-core", "futures-timer 3.0.2", "lazy_static", @@ -8468,7 +8573,7 @@ dependencies = [ [[package]] name = "sp-version" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8480,9 +8585,9 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.1.3", "parity-scale-codec", "sp-std", "wasmi", @@ -8508,29 +8613,30 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "statrs" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" +checksum = "cce16f6de653e88beca7bd13780d08e09d4489dbca1f9210e041bc4852481382" dependencies = [ - "rand 0.5.6", + "rand 0.7.3", ] [[package]] name = "stream-cipher" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8131256a5896cabcf5eb04f4d6dacbe1aefda854b0d9896e09cb58829ec5638c" +checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" dependencies = [ - "generic-array 0.12.3", + "generic-array 0.14.4", ] [[package]] name = "stream-cipher" -version = "0.4.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f8ed9974042b8c3672ff3030a69fcc03b74c47c3d1ecb7755e8a3626011e88" +checksum = "c80e15f898d8d8f25db24c253ea615cc14acf418ff307822995814e7d42cfa89" dependencies = [ - "generic-array 0.14.2", + "block-cipher 0.8.0", + "generic-array 0.14.4", ] [[package]] @@ -8562,9 +8668,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.15" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c" +checksum = "5277acd7ee46e63e5168a80734c9f6ee81b1367a7d8772a2d765df2a3705d28c" dependencies = [ "clap", "lazy_static", @@ -8573,15 +8679,15 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.8" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118" +checksum = "5ba9cdfda491b814720b6b06e0cac513d922fc407582032e8706e9f137976f90" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -8600,9 +8706,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" dependencies = [ "heck", - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -8611,8 +8717,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" dependencies = [ - "hmac", - "pbkdf2", + "hmac 0.7.1", + "pbkdf2 0.3.0", "schnorrkel", "sha2 0.8.2", "zeroize", @@ -8621,18 +8727,18 @@ dependencies = [ [[package]] name = "substrate-browser-utils" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "chrono", "console_error_panic_hook", "console_log", "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.8", "futures-timer 3.0.2", "js-sys", "kvdb-web", "libp2p-wasm-ext", - "log 0.4.11", + "log", "rand 0.6.5", "rand 0.7.3", "sc-chain-spec", @@ -8647,7 +8753,7 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "platforms", ] @@ -8655,14 +8761,14 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "frame-system-rpc-runtime-api", - "futures 0.3.5", + "futures 0.3.8", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "log 0.4.11", + "log", "parity-scale-codec", "sc-client-api", "sc-rpc-api", @@ -8678,13 +8784,13 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "async-std", "derive_more", "futures-util", "hyper 0.13.6", - "log 0.4.11", + "log", "prometheus", "tokio 0.2.21", ] @@ -8692,10 +8798,10 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "futures 0.1.29", - "futures 0.3.5", + "futures 0.3.8", "hash-db", "hex", "parity-scale-codec", @@ -8719,9 +8825,9 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "2.0.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "substrate-test-utils-derive", "tokio 0.2.21", ] @@ -8729,18 +8835,28 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.8.0" -source = "git+https://github.com/paritytech/substrate#668ecade60abe5a644868aefd3d231c9839fe652" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-release-v0.8.27#8c3b3fb1b0c858cc603444eafab4032caf6795ce" dependencies = [ "proc-macro-crate", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] -name = "substrate-wasm-builder-runner" -version = "2.0.0" +name = "substrate-wasm-builder" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54cab12167e32b38a62c5ea5825aa0874cde315f907a46aad2b05aa8ef3d862f" +checksum = "79091baab813855ddf65b191de9fe53e656b6b67c1e9bd23fdcbff8788164684" +dependencies = [ + "ansi_term 0.12.1", + "atty", + "build-helper", + "cargo_metadata", + "tempfile", + "toml", + "walkdir", + "wasm-gc-api", +] [[package]] name = "subtle" @@ -8767,35 +8883,24 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.33" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d5d96e8cbb005d6959f119f773bfaebb5684296108fb32600c00cde305b2cd" +checksum = "cc371affeffc477f42a221a1e4297aedcea33d47d19b61455588bd9d8f6b19ac" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", "unicode-xid 0.2.1", ] -[[package]] -name = "syn-mid" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" -dependencies = [ - "proc-macro2 1.0.18", - "quote 1.0.7", - "syn 1.0.33", -] - [[package]] name = "synstructure" version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", "unicode-xid 0.2.1", ] @@ -8817,7 +8922,7 @@ version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "rand 0.7.3", "redox_syscall", @@ -8836,22 +8941,48 @@ dependencies = [ [[package]] name = "test-parachain-adder" -version = "0.8.26" +version = "0.8.27" dependencies = [ "dlmalloc", "parity-scale-codec", "polkadot-parachain", "sp-io", "sp-std", - "substrate-wasm-builder-runner", - "tiny-keccak 1.5.0", + "substrate-wasm-builder", + "tiny-keccak", +] + +[[package]] +name = "test-parachain-adder-collator" +version = "0.7.26" +dependencies = [ + "futures 0.3.8", + "futures-timer 3.0.2", + "log", + "parity-scale-codec", + "polkadot-cli", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-parachain", + "polkadot-primitives", + "polkadot-service", + "polkadot-test-service", + "sc-authority-discovery", + "sc-cli", + "sc-service", + "sp-core", + "sp-keyring", + "structopt", + "substrate-test-utils", + "test-parachain-adder", + "tokio 0.2.21", ] [[package]] name = "test-parachain-halt" -version = "0.8.26" +version = "0.8.27" dependencies = [ - "substrate-wasm-builder-runner", + "substrate-wasm-builder", ] [[package]] @@ -8863,7 +8994,7 @@ dependencies = [ "sp-core", "test-parachain-adder", "test-parachain-halt", - "tiny-keccak 1.5.0", + "tiny-keccak", ] [[package]] @@ -8888,22 +9019,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "318234ffa22e0920fe9a40d7b8369b5f649d490980cf7aadcf1eb91594869b42" +checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cae2447b6282786c3493999f40a9be2a6ad20cb8bd268b0a0dbf5a065535c0ab" +checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -8936,27 +9067,20 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" dependencies = [ - "failure", - "hmac", - "once_cell 1.4.1", - "pbkdf2", + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.8.2", + "sha2 0.9.1", + "thiserror", "unicode-normalization", -] - -[[package]] -name = "tiny-keccak" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d8a021c69bb74a44ccedb824a046447e2c84a01df9e5c20779750acb38e11b2" -dependencies = [ - "crunchy", + "zeroize", ] [[package]] @@ -8986,11 +9110,11 @@ dependencies = [ "num_cpus", "tokio-codec", "tokio-current-thread", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-fs", "tokio-io", "tokio-reactor", - "tokio-sync 0.1.8", + "tokio-sync", "tokio-tcp", "tokio-threadpool", "tokio-timer", @@ -9014,7 +9138,7 @@ dependencies = [ "mio", "mio-uds", "num_cpus", - "pin-project-lite", + "pin-project-lite 0.1.7", "signal-hook-registry", "slab", "tokio-macros", @@ -9050,7 +9174,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ "futures 0.1.29", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] @@ -9063,17 +9187,6 @@ dependencies = [ "futures 0.1.29", ] -[[package]] -name = "tokio-executor" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ee9ceecf69145923834ea73f32ba40c790fd877b74a7817dd0b089f1eb9c7c8" -dependencies = [ - "futures-util-preview", - "lazy_static", - "tokio-sync 0.2.0-alpha.6", -] - [[package]] name = "tokio-fs" version = "0.1.7" @@ -9093,7 +9206,7 @@ checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", "futures 0.1.29", - "log 0.4.11", + "log", ] [[package]] @@ -9102,9 +9215,9 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -9129,14 +9242,14 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "lazy_static", - "log 0.4.11", + "log", "mio", "num_cpus", "parking_lot 0.9.0", "slab", - "tokio-executor 0.1.10", + "tokio-executor", "tokio-io", - "tokio-sync 0.1.8", + "tokio-sync", ] [[package]] @@ -9146,7 +9259,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "228139ddd4fea3fa345a29233009635235833e52807af7ea6448ead03890d6a9" dependencies = [ "futures-core", - "rustls", + "rustls 0.18.0", "tokio 0.2.21", "webpki", ] @@ -9170,17 +9283,6 @@ dependencies = [ "futures 0.1.29", ] -[[package]] -name = "tokio-sync" -version = "0.2.0-alpha.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1aaeb685540f7407ea0e27f1c9757d258c7c6bf4e3eb19da6fc59b747239d2" -dependencies = [ - "fnv", - "futures-core-preview", - "futures-util-preview", -] - [[package]] name = "tokio-tcp" version = "0.1.4" @@ -9206,10 +9308,10 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "lazy_static", - "log 0.4.11", + "log", "num_cpus", "slab", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] @@ -9221,7 +9323,7 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "slab", - "tokio-executor 0.1.10", + "tokio-executor", ] [[package]] @@ -9232,7 +9334,7 @@ checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", "futures 0.1.29", - "log 0.4.11", + "log", "mio", "tokio-codec", "tokio-io", @@ -9249,7 +9351,7 @@ dependencies = [ "futures 0.1.29", "iovec", "libc", - "log 0.4.11", + "log", "mio", "mio-uds", "tokio-codec", @@ -9266,8 +9368,8 @@ dependencies = [ "bytes 0.5.6", "futures-core", "futures-sink", - "log 0.4.11", - "pin-project-lite", + "log", + "pin-project-lite 0.1.7", "tokio 0.2.21", ] @@ -9288,12 +9390,12 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" dependencies = [ - "cfg-if", - "pin-project-lite", + "cfg-if 1.0.0", + "pin-project-lite 0.2.0", "tracing-attributes", "tracing-core", ] @@ -9304,9 +9406,9 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", ] [[package]] @@ -9334,7 +9436,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" dependencies = [ - "pin-project", + "pin-project 0.4.23", "tracing", ] @@ -9345,7 +9447,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" dependencies = [ "lazy_static", - "log 0.4.11", + "log", "tracing-core", ] @@ -9361,9 +9463,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef0a5e15477aa303afbfac3a44cba9b6430fdaad52423b1e6c0dbbe28c3eedd" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" dependencies = [ "ansi_term 0.12.1", "chrono", @@ -9373,7 +9475,7 @@ dependencies = [ "serde", "serde_json", "sharded-slab", - "smallvec 1.4.2", + "smallvec 1.5.1", "thread_local", "tracing", "tracing-core", @@ -9389,15 +9491,15 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-db" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f1a9a9252d38c5337cf0c5392988821a5cf1b2103245016968f2ab41de9e38" +checksum = "9e55f7ace33d6237e14137e386f4e1672e2a5c6bbc97fef9f438581a143971f0" dependencies = [ "hash-db", "hashbrown 0.8.0", - "log 0.4.11", + "log", "rustc-hex", - "smallvec 1.4.2", + "smallvec 1.5.1", ] [[package]] @@ -9442,7 +9544,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "173cd16430c206dc1a430af8a89a0e9c076cf15cb42b4aedb10e8cc8fee73681" dependencies = [ - "byteorder 1.3.4", + "byteorder", "crunchy", "rustc-hex", "static_assertions", @@ -9505,36 +9607,20 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" dependencies = [ - "generic-array 0.14.2", + "generic-array 0.14.4", "subtle 2.2.3", ] -[[package]] -name = "unsigned-varint" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67332660eb59a6f1eb24ff1220c9e8d01738a8503c6002e30bcfe4bd9f2b4a9" - -[[package]] -name = "unsigned-varint" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "669d776983b692a906c881fcd0cfb34271a48e197e4d6cb8df32b05bfc3d3fa5" -dependencies = [ - "bytes 0.5.6", - "futures-io", - "futures-util", - "futures_codec", -] - [[package]] name = "unsigned-varint" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fdeedbf205afadfe39ae559b75c3240f24e257d0ca27e85f85cb82aa19ac35" dependencies = [ + "bytes 0.5.6", "futures-io", "futures-util", + "futures_codec", ] [[package]] @@ -9606,9 +9692,20 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9571542c2ce85ce642e6b58b3364da2fb53526360dfb7c211add4f5c23105ff7" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + +[[package]] +name = "walkdir" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +dependencies = [ + "same-file", + "winapi 0.3.9", + "winapi-util", +] [[package]] name = "want" @@ -9617,7 +9714,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ "futures 0.1.29", - "log 0.4.11", + "log", "try-lock", ] @@ -9627,7 +9724,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.11", + "log", "try-lock", ] @@ -9639,11 +9736,11 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.64" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a634620115e4a229108b71bde263bb4220c483b3f07f5ba514ee8d15064c4c2" +checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro", @@ -9651,26 +9748,26 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.64" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e53963b583d18a5aa3aaae4b4c1cb535218246131ba22a71f05b518098571df" +checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.11", - "proc-macro2 1.0.18", + "log", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.14" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba48d66049d2a6cc8488702e7259ab7afc9043ad0dc5448444f46f2a453b362" +checksum = "1fe9756085a84584ee9457a002b7cdfe0bfff169f45d2591d8be1345a6780e35" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -9678,9 +9775,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.64" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fcfd5ef6eec85623b4c6e844293d4516470d8f19cd72d0d12246017eb9060b8" +checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" dependencies = [ "quote 1.0.7", "wasm-bindgen-macro-support", @@ -9688,34 +9785,44 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.64" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9adff9ee0e94b926ca81b57f57f86d5545cdcb1d259e21ec9bdd95b901754c75" +checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.64" +version = "0.2.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7b90ea6c632dd06fd765d44542e234d5e63d9bb917ecd64d79778a13bd79ae" +checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" + +[[package]] +name = "wasm-gc-api" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" +dependencies = [ + "log", + "parity-wasm 0.32.0", + "rustc-demangle", +] [[package]] name = "wasm-timer" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" +checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.5", + "futures 0.3.8", "js-sys", - "parking_lot 0.9.0", + "parking_lot 0.11.1", "pin-utils", - "send_wrapper 0.2.0", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", @@ -9731,7 +9838,7 @@ dependencies = [ "memory_units", "num-rational", "num-traits 0.2.12", - "parity-wasm", + "parity-wasm 0.41.0", "wasmi-validation", ] @@ -9741,7 +9848,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" dependencies = [ - "parity-wasm", + "parity-wasm 0.41.0", ] [[package]] @@ -9764,13 +9871,13 @@ checksum = "1cd3c4f449382779ef6e0a7c3ec6752ae614e20a42e4100000c3efdc973100e2" dependencies = [ "anyhow", "backtrace", - "cfg-if", + "cfg-if 0.1.10", "lazy_static", "libc", - "log 0.4.11", + "log", "region", "rustc-demangle", - "smallvec 1.4.2", + "smallvec 1.5.1", "target-lexicon", "wasmparser 0.59.0", "wasmtime-environ", @@ -9806,17 +9913,17 @@ dependencies = [ "anyhow", "base64 0.12.3", "bincode", - "cfg-if", + "cfg-if 0.1.10", "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "cranelift-wasm", - "directories", + "directories 2.0.2", "errno", "file-per-thread-logger", "indexmap", "libc", - "log 0.4.11", + "log", "more-asserts", "rayon", "serde", @@ -9835,14 +9942,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e914c013c7a9f15f4e429d5431f2830fb8adb56e40567661b69c5ec1d645be23" dependencies = [ "anyhow", - "cfg-if", + "cfg-if 0.1.10", "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "cranelift-native", "cranelift-wasm", "gimli 0.21.0", - "log 0.4.11", + "log", "more-asserts", "object 0.20.0", "region", @@ -9878,7 +9985,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8d4d1af8dd5f7096cfcc89dd668d358e52980c38cce199643372ffd6590e27" dependencies = [ "anyhow", - "cfg-if", + "cfg-if 0.1.10", "gimli 0.21.0", "lazy_static", "libc", @@ -9898,11 +10005,11 @@ checksum = "3a25f140bbbaadb07c531cba99ce1a966dba216138dc1b2a0ddecec851a01a93" dependencies = [ "backtrace", "cc", - "cfg-if", + "cfg-if 0.1.10", "indexmap", "lazy_static", "libc", - "log 0.4.11", + "log", "memoffset", "more-asserts", "region", @@ -9951,34 +10058,25 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91cd5736df7f12a964a5067a12c62fa38e1bd8080aff1f80bc29be7c80d19ab4" -dependencies = [ - "webpki", -] - -[[package]] -name = "webpki-roots" -version = "0.19.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8eff4b7516a57307f9349c64bf34caa34b940b66fed4b2fb3136cb7386e5739" +checksum = "82015b7e0b8bad8185994674a13a93306bea76cf5a16c5a181382fd3a5ec2376" dependencies = [ "webpki", ] [[package]] -name = "wepoll-sys-stjepang" -version = "1.0.6" +name = "wepoll-sys" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" dependencies = [ "cc", ] [[package]] name = "westend-runtime" -version = "0.8.26" +version = "0.8.27" dependencies = [ "bitvec", "frame-benchmarking", @@ -9987,9 +10085,9 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", - "hex-literal 0.2.1", + "hex-literal", "libsecp256k1", - "log 0.3.9", + "log", "pallet-authority-discovery", "pallet-authorship", "pallet-babe", @@ -10030,7 +10128,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", - "smallvec 1.4.2", + "smallvec 1.5.1", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -10048,8 +10146,8 @@ dependencies = [ "sp-trie", "sp-version", "static_assertions", - "substrate-wasm-builder-runner", - "tiny-keccak 1.5.0", + "substrate-wasm-builder", + "tiny-keccak", ] [[package]] @@ -10120,7 +10218,18 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" dependencies = [ - "curve25519-dalek", + "curve25519-dalek 2.1.0", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "x25519-dalek" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc614d95359fd7afc321b66d2107ede58b246b844cf5d8a0adcca413e439f088" +dependencies = [ + "curve25519-dalek 3.0.0", "rand_core 0.5.1", "zeroize", ] @@ -10152,7 +10261,7 @@ name = "xcm-executor" version = "0.8.22" dependencies = [ "frame-support", - "impl-trait-for-tuples", + "impl-trait-for-tuples 0.2.0", "parity-scale-codec", "sp-arithmetic", "sp-core", @@ -10168,19 +10277,19 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aeb8c4043cac71c3c299dff107171c220d179492350ea198e109a414981b83c" dependencies = [ - "futures 0.3.5", - "log 0.4.11", + "futures 0.3.8", + "log", "nohash-hasher", - "parking_lot 0.11.0", + "parking_lot 0.11.1", "rand 0.7.3", "static_assertions", ] [[package]] name = "zeroize" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" dependencies = [ "zeroize_derive", ] @@ -10191,9 +10300,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ - "proc-macro2 1.0.18", + "proc-macro2 1.0.24", "quote 1.0.7", - "syn 1.0.33", + "syn 1.0.48", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index ad0406e000b363b28762ee5dad39deb065528554..57708ded628eb9796c5280cad90fc0495cbe85dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,21 +6,22 @@ path = "src/main.rs" name = "polkadot" description = "Implementation of a https://polkadot.network node in Rust based on the Substrate framework." license = "GPL-3.0-only" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" readme = "README.md" [dependencies] cli = { package = "polkadot-cli", path = "cli" } -color-eyre = "0.5.6" -futures = "0.3.4" +color-eyre = "0.5.10" +thiserror = "1.0.22" +futures = "0.3.8" service = { package = "polkadot-service", path = "node/service" } parity-util-mem = { version = "*", default-features = false, features = ["jemalloc-global"] } [dev-dependencies] -assert_cmd = "0.12" -nix = "0.17" +assert_cmd = "1.0.2" +nix = "0.19.1" tempfile = "3.1.0" [workspace] @@ -33,7 +34,7 @@ members = [ "runtime/parachains", "runtime/polkadot", "runtime/kusama", - "runtime/rococo-v1", + "runtime/rococo", "runtime/westend", "runtime/test-runtime", "statement-table", @@ -68,6 +69,7 @@ members = [ "node/test/service", "parachain/test-parachains", "parachain/test-parachains/adder", + "parachain/test-parachains/adder/collator", ] [badges] @@ -84,6 +86,7 @@ panic = "unwind" [features] runtime-benchmarks=["cli/runtime-benchmarks"] +real-overseer=["cli/real-overseer"] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] diff --git a/README.md b/README.md index d254535f8b8c0c7e50c0d792c84f8a360700cdf3..0b54f90ebac2b1f5a485d577b8440f512354e451 100644 --- a/README.md +++ b/README.md @@ -40,6 +40,9 @@ gpg --export 9D4B2B6EB8F97156D19669A9FF0812D491B96798 > /usr/share/keyrings/pari # Add the Parity repository and update the package index echo 'deb [signed-by=/usr/share/keyrings/parity.gpg] https://releases.parity.io/deb release main' > /etc/apt/sources.list.d/parity.list apt update +# Install the `parity-keyring` package - This will ensure the GPG key +# used by APT remains up-to-date +apt install parity-keyring # Install polkadot apt install polkadot diff --git a/RELEASE.md b/RELEASE.md index e0e219ad1a579553ec18b54431434c686d122d9b..554cfb8e455e27b26a670f171d9996c31be735e3 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -3,14 +3,14 @@ Polkadot Release Process ### Branches * release-candidate branch: The branch used for staging of the next release. - Named like `release-v0.8.26` + Named like `release-v0.8.26` * release branch: The branch to which successful release-candidates are merged and tagged with the new version. Named literally `release`. ### Notes * The release-candidate branch *must* be made in the paritytech/polkadot repo in order for release automation to work correctly -* Any new pushes/merges to the release-candidate branch (for example, +* Any new pushes/merges to the release-candidate branch (for example, refs/heads/release-v0.8.26) will result in the rc index being bumped (e.g., v0.8.26-rc1 to v0.8.26-rc2) and new wasms built. @@ -32,14 +32,25 @@ automated and require no human action. completed 6. (optional) If a fix is required to the release-candidate: 1. Merge the fix with `master` first - 2. Checkout the release-candidate branch and merge `master` - 3. Revert all changes since the creation of the release-candidate that are - **not** required for the fix. - 4. Push the release-candidate branch to Github - this is now the new release- + 2. Cherry-pick the commit from `master` to `release-v0.8.26`, fixing any + merge conflicts. Try to avoid unnecessarily bumping crates. + 3. Push the release-candidate branch to Github - this is now the new release- candidate + 4. Depending on the cherry-picked changes, it may be necessary to perform some + or all of the manual tests again. 7. Once happy with the release-candidate, perform the release using the release script located at `scripts/release.sh` (or perform the steps in that script manually): - `./scripts/release.sh v0.8.26` 8. NOACTION: The HEAD of the `release` branch will be tagged with `v0.8.26`, - and a final release will be created on Github. \ No newline at end of file + and a final draft release will be created on Github. + +### Security releases + +Occasionally there may be changes that need to be made to the most recently +released version of Polkadot, without taking *every* change to `master` since +the last release. For example, in the event of a security vulnerability being +found, where releasing a fixed version is a matter of some expediency. In cases +like this, the fix should first be merged with master, cherry-picked to a branch +forked from `release`, tested, and then finally merged with `release`. A +sensible versioning scheme for changes like this is `vX.Y.Z-1`. diff --git a/cli/Cargo.toml b/cli/Cargo.toml index d3f2447856ba76ce52ba365e251c1e91bc34c4ce..466025d782d40109ae1a76d1510852e5a481d16a 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-cli" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] description = "Polkadot Relay-chain Client Node" edition = "2018" @@ -15,27 +15,31 @@ crate-type = ["cdylib", "rlib"] [dependencies] log = "0.4.11" -structopt = { version = "0.3.8", optional = true } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "master" } +thiserror = "1.0.22" +structopt = { version = "0.3.21", optional = true } +wasm-bindgen = { version = "0.2.69", optional = true } +wasm-bindgen-futures = { version = "0.4.19", optional = true } + service = { package = "polkadot-service", path = "../node/service", default-features = false, optional = true } +polkadot-parachain = { path = "../parachain", optional = true } -frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +tracing-futures = "0.2.4" +frame-benchmarking-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } +sc-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } +browser-utils = { package = "substrate-browser-utils", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } -wasm-bindgen = { version = "0.2.57", optional = true } -wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { package = "substrate-browser-utils", git = "https://github.com/paritytech/substrate", branch = "master", optional = true } # this crate is used only to enable `trie-memory-tracker` feature # see https://github.com/paritytech/substrate/pull/6745 -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } [build-dependencies] -substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-build-script-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } [features] -default = [ "wasmtime", "db", "cli", "full-node", "trie-memory-tracker" ] +default = [ "wasmtime", "db", "cli", "full-node", "trie-memory-tracker", "polkadot-parachain" ] wasmtime = [ "sc-cli/wasmtime" ] db = [ "service/db" ] cli = [ @@ -53,3 +57,4 @@ browser = [ runtime-benchmarks = [ "service/runtime-benchmarks" ] trie-memory-tracker = [ "sp-trie/memory-tracker" ] full-node = [ "service/full-node" ] +real-overseer = [ "service/real-overseer" ] diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 9768f57e1ea7f341b19eeddec4c03bfa5263fdb2..bf9c7a242b25809e5ee8a1a8b68660dbbe33ef1f 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -52,6 +52,9 @@ pub enum Subcommand { about = "Benchmark runtime pallets." )] Benchmark(frame_benchmarking_cli::BenchmarkCmd), + + /// Key management cli utilities + Key(sc_cli::KeySubcommand), } #[allow(missing_docs)] @@ -80,23 +83,6 @@ pub struct RunCmd { #[structopt(long = "force-rococo")] pub force_rococo: bool, - /// Disable the authority discovery module on validator or sentry nodes. - /// - /// Enabled by default on validator and sentry nodes. Always disabled on non - /// validator or sentry nodes. - /// - /// When enabled: - /// - /// (1) As a validator node: Make oneself discoverable by publishing either - /// ones own network addresses, or the ones of ones sentry nodes - /// (configured via the `sentry-nodes` flag). - /// - /// (2) As a validator or sentry node: Discover addresses of validators or - /// addresses of their sentry nodes and maintain a permanent connection - /// to a subset. - #[structopt(long = "disable-authority-discovery")] - pub authority_discovery_disabled: bool, - /// Setup a GRANDPA scheduled voting pause. /// /// This parameter takes two values, namely a block number and a delay (in diff --git a/cli/src/command.rs b/cli/src/command.rs index eba3bce8089c1b02232e11ce4a04b74d5f691c14..85341278fb6dae71d2d36dc177fb69ba375a091c 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -125,7 +125,6 @@ pub fn run() -> Result<()> { set_default_ss58_version(chain_spec); - let authority_discovery_disabled = cli.run.authority_discovery_disabled; let grandpa_pause = if cli.run.grandpa_pause.is_empty() { None } else { @@ -147,7 +146,6 @@ pub fn run() -> Result<()> { Role::Light => service::build_light(config).map(|(task_manager, _)| task_manager), _ => service::build_full( config, - authority_discovery_disabled, service::IsCollator::No, grandpa_pause, ).map(|full| full.task_manager), @@ -218,13 +216,13 @@ pub fn run() -> Result<()> { }) }, Some(Subcommand::ValidationWorker(cmd)) => { - let _ = sc_cli::init_logger("", sc_tracing::TracingReceiver::Log, None); + let _ = sc_cli::init_logger("", sc_tracing::TracingReceiver::Log, None, false); - if cfg!(feature = "browser") { + if cfg!(feature = "browser") || cfg!(target_os = "android") { Err(sc_cli::Error::Input("Cannot run validation worker in browser".into())) } else { - #[cfg(all(not(feature = "browser"), not(feature = "service-rewr")))] - service::run_validation_worker(&cmd.mem_id)?; + #[cfg(not(any(target_os = "android", feature = "browser")))] + polkadot_parachain::wasm_executor::run_worker(&cmd.mem_id)?; Ok(()) } }, @@ -238,5 +236,6 @@ pub fn run() -> Result<()> { cmd.run::(config) }) }, + Some(Subcommand::Key(cmd)) => cmd.run(), } } diff --git a/core-primitives/Cargo.toml b/core-primitives/Cargo.toml index cd4dac0f187d1cf4ced8ba938b48521fc3bf2669..73ba85e0adf5d5dab4538cd4c0703941c52091f5 100644 --- a/core-primitives/Cargo.toml +++ b/core-primitives/Cargo.toml @@ -5,10 +5,10 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = [ "derive" ] } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +parity-scale-codec = { version = "1.3.5", default-features = false, features = [ "derive" ] } [features] default = [ "std" ] @@ -16,5 +16,5 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", - "codec/std", + "parity-scale-codec/std", ] diff --git a/core-primitives/src/lib.rs b/core-primitives/src/lib.rs index d3f567f66a00ad0df3113bf299c888daba18d021..e96d0b4a5f611aacbac4b2f59f0f038517b9ba44 100644 --- a/core-primitives/src/lib.rs +++ b/core-primitives/src/lib.rs @@ -21,6 +21,7 @@ //! These core Polkadot types are used by the relay chain and the Parachains. use sp_runtime::{generic, MultiSignature, traits::{Verify, BlakeTwo256, IdentifyAccount}}; +use parity_scale_codec::{Encode, Decode}; /// The block number type used by Polkadot. /// 32-bits will allow for 136 years of blocks assuming 1 block per second. @@ -50,6 +51,21 @@ pub type ChainId = u32; /// A hash of some data used by the relay chain. pub type Hash = sp_core::H256; +/// Unit type wrapper around [`Hash`] that represents a candidate hash. +/// +/// This type is produced by [`CandidateReceipt::hash`]. +/// +/// This type makes it easy to enforce that a hash is a candidate hash on the type level. +#[derive(Clone, Copy, Encode, Decode, Hash, Eq, PartialEq, Debug, Default)] +pub struct CandidateHash(pub Hash); + +#[cfg(feature="std")] +impl std::fmt::Display for CandidateHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.fmt(f) + } +} + /// Index of a transaction in the relay chain. 32-bit should be plenty. pub type Nonce = u32; @@ -86,7 +102,7 @@ pub type DownwardMessage = sp_std::vec::Vec; /// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number when /// the message was sent. -#[derive(codec::Encode, codec::Decode, Clone, sp_runtime::RuntimeDebug, PartialEq)] +#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq)] pub struct InboundDownwardMessage { /// The block number at which this messages was put into the downward message queue. pub sent_at: BlockNumber, @@ -94,6 +110,26 @@ pub struct InboundDownwardMessage { pub msg: DownwardMessage, } +/// An HRMP message seen from the perspective of a recipient. +#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq)] +pub struct InboundHrmpMessage { + /// The block number at which this message was sent. + /// Specifically, it is the block number at which the candidate that sends this message was + /// enacted. + pub sent_at: BlockNumber, + /// The message payload. + pub data: sp_std::vec::Vec, +} + +/// An HRMP message seen from the perspective of a sender. +#[derive(Encode, Decode, Clone, sp_runtime::RuntimeDebug, PartialEq, Eq, Hash)] +pub struct OutboundHrmpMessage { + /// The para that will get this message in its downward message queue. + pub recipient: Id, + /// The message payload. + pub data: sp_std::vec::Vec, +} + /// V1 primitives. pub mod v1 { pub use super::*; diff --git a/doc/docker.md b/doc/docker.md index c2c437a64710b85929bda11308134af73c2180d8..1d3b860a06f1706e37efd7689b2442d10d3fa6be 100644 --- a/doc/docker.md +++ b/doc/docker.md @@ -7,29 +7,29 @@ the polkadot binary, pulled from our package repository. Let´s first check the version we have. The first time you run this command, the polkadot docker image will be downloaded. This takes a bit of time and bandwidth, be patient: ```bash -docker run --rm -it parity/polkadot:latest polkadot --version +docker run --rm -it parity/polkadot:latest --version ``` You can also pass any argument/flag that polkadot supports: ```bash -docker run --rm -it parity/polkadot:latest polkadot --chain westend --name "PolkaDocker" +docker run --rm -it parity/polkadot:latest --chain westend --name "PolkaDocker" ``` Once you are done experimenting and picking the best node name :) you can start polkadot as daemon, exposes the polkadot ports and mount a volume that will keep your blockchain data locally: ```bash -docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest polkadot --chain westend +docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest --chain westend ``` Additionally if you want to have custom node name you can add the `--name "YourName"` at the end ```bash -docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest polkadot --chain westend --name "PolkaDocker" +docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest --chain westend --name "PolkaDocker" ``` ```bash -docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest polkadot --rpc-external --chain westend +docker run -d -p 30333:30333 -p 9933:9933 -v /my/local/folder:/data parity/polkadot:latest --rpc-external --chain westend ``` If you want to connect to rpc port 9933, then must add polkadot startup parameter: `--rpc-external`. @@ -70,7 +70,7 @@ If you run into issues with polkadot when using docker, please run the following (replace the tag with the appropriate one if you do not use latest): ```bash -docker run --rm -it parity/polkadot:latest polkadot --version +docker run --rm -it parity/polkadot:latest --version ``` This will show you the polkadot version as well as the git commit ref that was used to build your container. diff --git a/erasure-coding/Cargo.toml b/erasure-coding/Cargo.toml index 0983db3c1d4830acd4922c55f4e566a31cb96fee..c1a8c9b41264927fd8c9568b6b24c399fba8586d 100644 --- a/erasure-coding/Cargo.toml +++ b/erasure-coding/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "polkadot-erasure-coding" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] primitives = { package = "polkadot-primitives", path = "../primitives" } -reed_solomon = { package = "reed-solomon-erasure", version = "4.0.2"} -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.21" +reed_solomon = { package = "reed-solomon-erasure", version = "4.0.2" } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +thiserror = "1.0.22" diff --git a/erasure-coding/fuzzer/Cargo.lock b/erasure-coding/fuzzer/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..4af40a99bbf6a506955f61ab6b4df0aed59cec9c --- /dev/null +++ b/erasure-coding/fuzzer/Cargo.lock @@ -0,0 +1,2621 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "addr2line" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c0929d69e78dd9bf5408269919fcbcaeb2e35e5d43e5815517cdc6a8e11a423" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" + +[[package]] +name = "ahash" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8fd72866655d1904d6b0997d0b07ba561047d070fbe29de039031c641b61217" + +[[package]] +name = "aho-corasick" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +dependencies = [ + "memchr", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf8dcb5b4bbaa28653b647d8c77bd4ed40183b48882e130c1f1ffb73de069fd7" + +[[package]] +name = "arbitrary" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db55d72333851e17d572bec876e390cd3b11eb1ef53ae821dd9f3b653d2b4569" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "async-trait" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3a45e77e34375a7923b1e8febb049bb011f064714a8e17a1a616fef01da13d" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "autocfg" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" + +[[package]] +name = "backtrace" +version = "0.3.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef5140344c85b01f9bbb4d4b7288a8aa4b3287ccef913a14bcc78a1063623598" +dependencies = [ + "addr2line", + "cfg-if 1.0.0", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bitvec" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium", +] + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array 0.12.3", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "bytes" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" +dependencies = [ + "byteorder", + "iovec", +] + +[[package]] +name = "cc" +version = "1.0.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95752358c8f7552394baf48cd82695b345628ad3f170d607de3ca03b8dacca15" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +dependencies = [ + "libc", + "num-integer", + "num-traits 0.2.14", + "time", + "winapi", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "cloudabi" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4344512281c643ae7638bbabc3af17a11307803ec8f0fcad9fae512a8bf36467" +dependencies = [ + "bitflags", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "cpuid-bool" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array 0.12.3", + "subtle 1.0.0", +] + +[[package]] +name = "crypto-mac" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" +dependencies = [ + "generic-array 0.14.4", + "subtle 2.3.0", +] + +[[package]] +name = "curve25519-dalek" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d85653f070353a16313d0046f173f70d1aadd5b42600a14de626f0dfb3473a5" +dependencies = [ + "byteorder", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle 2.3.0", + "zeroize", +] + +[[package]] +name = "curve25519-dalek" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8492de420e9e60bc9a1d66e2dbb91825390b738a388606600663fc529b4b307" +dependencies = [ + "byteorder", + "digest 0.9.0", + "rand_core 0.5.1", + "subtle 2.3.0", + "zeroize", +] + +[[package]] +name = "derive_more" +version = "0.99.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cb0e6161ad61ed084a36ba71fbba9e3ac5aee3606fb607fe08da6acbcf3d8c" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array 0.12.3", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.4", +] + +[[package]] +name = "dyn-clonable" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] + +[[package]] +name = "dyn-clonable-impl" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "dyn-clone" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d55796afa1b20c2945ca8eabfc421839f2b766619209f1ede813cf2484f31804" + +[[package]] +name = "ed25519" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c66a534cbb46ab4ea03477eae19d5c22c01da8258030280b7bd9d8433fb6ef" +dependencies = [ + "signature", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +dependencies = [ + "curve25519-dalek 3.0.0", + "ed25519", + "rand 0.7.3", + "serde", + "sha2 0.9.2", + "zeroize", +] + +[[package]] +name = "either" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" + +[[package]] +name = "enum_primitive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4551092f4d519593039259a9ed8daedf0da12e5109c5280338073eaeb81180" +dependencies = [ + "num-traits 0.1.43", +] + +[[package]] +name = "environmental" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6576a1755ddffd988788025e75bce9e74b018f7cc226198fe931d077911c6d7e" + +[[package]] +name = "erasure_coding_fuzzer" +version = "0.1.0" +dependencies = [ + "honggfuzz", + "polkadot-erasure-coding", + "polkadot-primitives", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "fixed-hash" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11498d382790b7a8f2fd211780bec78619bba81cdad3a283997c0c41f836759c" +dependencies = [ + "byteorder", + "rand 0.7.3", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "frame-metadata" +version = "12.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-std", +] + +[[package]] +name = "frame-support" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "bitflags", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "serde", + "smallvec", + "sp-arithmetic", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-tracing", +] + +[[package]] +name = "frame-support-procedural" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "frame-support-procedural-tools", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "frame-system" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "frame-support", + "impl-trait-for-tuples", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "futures" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b3b0c040a1fe6529d30b3c5944b280c7f0dcb2930d2c3062bca967b602583d0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b7109687aa4e177ef6fe84553af6280ef2778bdb7783ba44c9dc3399110fe64" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "847ce131b72ffb13b6109a221da9ad97a64cbe48feb1028356b836b47b8f1748" + +[[package]] +name = "futures-executor" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4caa2b2b68b880003057c1dd49f1ed937e38f22fcf6c212188a121f08cf40a65" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", + "num_cpus", +] + +[[package]] +name = "futures-io" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "611834ce18aaa1bd13c4b374f5d653e1027cf99b6b502584ff8c9a64413b30bb" + +[[package]] +name = "futures-macro" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77408a692f1f97bcc61dc001d752e00643408fbc922e4d634c655df50d595556" +dependencies = [ + "proc-macro-hack", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "futures-sink" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f878195a49cee50e006b02b93cf7e0a95a38ac7b776b4c4d9cc1207cd20fcb3d" + +[[package]] +name = "futures-task" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c554eb5bf48b2426c4771ab68c6b14468b6e76cc90996f528c3338d761a4d0d" +dependencies = [ + "once_cell", +] + +[[package]] +name = "futures-util" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d304cff4a7b99cfb7986f7d43fbe93d175e72e704a8860787cc95e9ffd85cbd2" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "gcc" +version = "0.3.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f5f3913fa0bfe7ee1fd8248b6b9f42a5af4b9d65ec2dd2c3c26132b950ecfc2" + +[[package]] +name = "generator" +version = "0.6.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cdc09201b2e8ca1b19290cf7e65de2246b8e91fb6874279722189c4de7b94dc" +dependencies = [ + "cc", + "libc", + "log", + "rustc_version", + "winapi", +] + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6" +dependencies = [ + "cfg-if 0.1.10", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", +] + +[[package]] +name = "gimli" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" + +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25" +dependencies = [ + "ahash", + "autocfg", +] + +[[package]] +name = "hermit-abi" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aca5565f760fb5b220e499d72710ed156fdb74e631659e99377d9ebfbd13ae8" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac 0.7.0", + "digest 0.8.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac 0.8.0", + "digest 0.9.0", +] + +[[package]] +name = "hmac-drbg" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +dependencies = [ + "digest 0.8.1", + "generic-array 0.12.3", + "hmac 0.7.1", +] + +[[package]] +name = "honggfuzz" +version = "0.5.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f085725a5828d7e959f014f624773094dfe20acc91be310ef106923c30594bc" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + +[[package]] +name = "impl-codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b47ca4d2b6931707a55fce5cf66aff80e2178c8b63bbb4ecb5695cbc870ddf6f" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "instant" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "integer-sqrt" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" +dependencies = [ + "num-traits 0.2.14", +] + +[[package]] +name = "iovec" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" +dependencies = [ + "libc", +] + +[[package]] +name = "itoa" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d58d1b70b004888f764dfbf6a26a3b0342a1632d33968e4a179d8011c760614" + +[[package]] +name = "libsecp256k1" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +dependencies = [ + "arrayref", + "crunchy", + "digest 0.8.1", + "hmac-drbg", + "rand 0.7.3", + "sha2 0.8.2", + "subtle 2.3.0", + "typenum", +] + +[[package]] +name = "lock_api" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "lock_api" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd96ffd135b2fd7b973ac026d28085defbe8983df057ced3eb4f2130b0831312" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fabed175da42fed1fa0746b0ea71f412aa9d35e76e95e59b192c64b9dc2bf8b" +dependencies = [ + "cfg-if 0.1.10", +] + +[[package]] +name = "loom" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0e8460f2f2121162705187214720353c517b97bdfb3494c0b1e33d83ebe4bed" +dependencies = [ + "cfg-if 0.1.10", + "generator", + "scoped-tls", + "serde", + "serde_json", +] + +[[package]] +name = "matchers" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "memchr" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "memory-db" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f36ddb0b2cdc25d38babba472108798e3477f02be5165f038c5e393e50c57a" +dependencies = [ + "hash-db", + "hashbrown", + "parity-util-mem", +] + +[[package]] +name = "memory_units" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "memrange" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc29ba65898edc4fdc252cb31cd3925f37c1a8ba25bb46eec883569984976530" +dependencies = [ + "rustc-serialize", +] + +[[package]] +name = "merlin" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", +] + +[[package]] +name = "miniz_oxide" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f2d26ec3309788e423cfbf68ad1800f061638098d76a83681af979dc4eda19d" +dependencies = [ + "adler", + "autocfg", +] + +[[package]] +name = "nix" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7fd5681d13fda646462cfbd4e5f2051279a89a544d50eb98c365b507246839f" +dependencies = [ + "bitflags", + "bytes", + "cfg-if 0.1.10", + "gcc", + "libc", + "void", +] + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg", + "num-integer", + "num-traits 0.2.14", +] + +[[package]] +name = "num-integer" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +dependencies = [ + "autocfg", + "num-traits 0.2.14", +] + +[[package]] +name = "num-rational" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" +dependencies = [ + "autocfg", + "num-bigint", + "num-integer", + "num-traits 0.2.14", +] + +[[package]] +name = "num-traits" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" +dependencies = [ + "num-traits 0.2.14", +] + +[[package]] +name = "num-traits" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" + +[[package]] +name = "once_cell" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13bd41f508810a131401606d54ac32a467c97172d74ba7662562ebba5ad07fa0" +dependencies = [ + "parking_lot 0.11.1", +] + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + +[[package]] +name = "parity-scale-codec" +version = "1.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c740e5fbcb6847058b40ac7e5574766c6388f585e184d769910fe0d3a2ca861" +dependencies = [ + "arrayvec 0.5.2", + "bitvec", + "byte-slice-cast", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "198db82bb1c18fc00176004462dd809b2a6d851669550aa17af6dacd21ae0c14" +dependencies = [ + "proc-macro-crate", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "parity-util-mem" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "297ff91fa36aec49ce183484b102f6b75b46776822bd81525bfc4cc9b0dd0f5c" +dependencies = [ + "cfg-if 0.1.10", + "hashbrown", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.10.2", + "primitive-types", + "winapi", +] + +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2 1.0.24", + "syn 1.0.51", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" + +[[package]] +name = "parking_lot" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" +dependencies = [ + "lock_api 0.3.4", + "parking_lot_core 0.7.2", +] + +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api 0.4.2", + "parking_lot_core 0.8.0", +] + +[[package]] +name = "parking_lot_core" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" +dependencies = [ + "cfg-if 0.1.10", + "cloudabi 0.0.3", + "libc", + "redox_syscall", + "smallvec", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c361aa727dd08437f2f1447be8b59a33b0edd15e0fcee698f935613d9efbca9b" +dependencies = [ + "cfg-if 0.1.10", + "cloudabi 0.1.0", + "instant", + "libc", + "redox_syscall", + "smallvec", + "winapi", +] + +[[package]] +name = "paste" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45ca20c77d80be666aef2b45486da86238fabe33e38306bd3118fe4af33fa880" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95a7db200b97ef370c8e6de0088252f7e0dfff7d047a28528e47456c0fc98b6" +dependencies = [ + "proc-macro-hack", +] + +[[package]] +name = "pbkdf2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" +dependencies = [ + "byteorder", + "crypto-mac 0.7.0", +] + +[[package]] +name = "pbkdf2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" +dependencies = [ + "crypto-mac 0.8.0", +] + +[[package]] +name = "pin-project" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ccc2237c2c489783abd8c4c80e5450fc0e98644555b1364da68cc29aa151ca7" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8e8d2bf0b23038a4424865103a4df472855692821aab4e4f5c3312d461d9e5f" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b063f57ec186e6140e2b8b6921e5f1bd89c7356dda5b33acc5401203ca6131c" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "polkadot-core-primitives" +version = "0.7.30" +dependencies = [ + "parity-scale-codec", + "sp-core", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "polkadot-erasure-coding" +version = "0.8.26" +dependencies = [ + "parity-scale-codec", + "polkadot-primitives", + "reed-solomon-erasure", + "sp-core", + "sp-trie", + "thiserror", +] + +[[package]] +name = "polkadot-parachain" +version = "0.8.26" +dependencies = [ + "derive_more", + "futures", + "log", + "parity-scale-codec", + "parking_lot 0.11.1", + "polkadot-core-primitives", + "sc-executor", + "serde", + "shared_memory", + "sp-core", + "sp-externalities", + "sp-io", + "sp-runtime", + "sp-std", + "sp-wasm-interface", + "thiserror", +] + +[[package]] +name = "polkadot-primitives" +version = "0.8.26" +dependencies = [ + "bitvec", + "frame-system", + "parity-scale-codec", + "polkadot-core-primitives", + "polkadot-parachain", + "serde", + "sp-api", + "sp-application-crypto", + "sp-arithmetic", + "sp-authority-discovery", + "sp-core", + "sp-inherents", + "sp-keystore", + "sp-runtime", + "sp-staking", + "sp-std", + "sp-trie", + "sp-version", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" + +[[package]] +name = "primitive-types" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd39dcacf71411ba488570da7bbc89b717225e46478b30ba99b92db6b149809" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro-nested" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" + +[[package]] +name = "proc-macro2" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +dependencies = [ + "unicode-xid 0.1.0", +] + +[[package]] +name = "proc-macro2" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +dependencies = [ + "unicode-xid 0.2.1", +] + +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + +[[package]] +name = "quote" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +dependencies = [ + "proc-macro2 0.4.30", +] + +[[package]] +name = "quote" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +dependencies = [ + "proc-macro2 1.0.24", +] + +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + +[[package]] +name = "rand" +version = "0.3.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" +dependencies = [ + "libc", + "rand 0.4.6", +] + +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha", + "rand_core 0.5.1", + "rand_hc", + "rand_pcg", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_pcg" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "redox_syscall" +version = "0.1.57" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" + +[[package]] +name = "reed-solomon-erasure" +version = "4.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a415a013dd7c5d4221382329a5a3482566da675737494935cbbbcdec04662f9d" +dependencies = [ + "smallvec", +] + +[[package]] +name = "ref-cast" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17626b2f4bcf35b84bf379072a66e28cfe5c3c6ae58b38e4914bb8891dabece" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c523ccaed8ac4b0288948849a350b37d3035827413c458b6a40ddb614bb4f72" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "regex" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38cf2c13ed4745de91a5eb834e11c00bcc3709e773173b2ce4c56c9fbde04b9c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-automata" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" +dependencies = [ + "byteorder", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b181ba2dcf07aaccad5448e8ead58db5b742cf85dfe035e2227f137a539a189" + +[[package]] +name = "rustc-demangle" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e3bad0ee36814ca07d7968269dd4b7ec89ec2da10c4bb613928d3077083c232" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc-serialize" +version = "0.3.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "ryu" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" + +[[package]] +name = "sc-executor" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "derive_more", + "lazy_static", + "libsecp256k1", + "log", + "parity-scale-codec", + "parity-wasm", + "parking_lot 0.10.2", + "sc-executor-common", + "sc-executor-wasmi", + "sp-api", + "sp-core", + "sp-externalities", + "sp-io", + "sp-panic-handler", + "sp-runtime-interface", + "sp-serializer", + "sp-tasks", + "sp-trie", + "sp-version", + "sp-wasm-interface", + "wasmi", +] + +[[package]] +name = "sc-executor-common" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "derive_more", + "log", + "parity-scale-codec", + "parity-wasm", + "sp-allocator", + "sp-core", + "sp-runtime-interface", + "sp-serializer", + "sp-wasm-interface", + "wasmi", +] + +[[package]] +name = "sc-executor-wasmi" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "log", + "parity-scale-codec", + "sc-executor-common", + "sp-allocator", + "sp-core", + "sp-runtime-interface", + "sp-wasm-interface", + "wasmi", +] + +[[package]] +name = "schnorrkel" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.0", + "getrandom", + "merlin", + "rand 0.7.3", + "rand_core 0.5.1", + "sha2 0.8.2", + "subtle 2.3.0", + "zeroize", +] + +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "secrecy" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" +dependencies = [ + "zeroize", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b88fa983de7720629c9387e9f517353ed404164b1e482c970a90c1a4aaf7dc1a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbd1ae72adb44aab48f325a02444a5fc079349a8d804c1fc922aed3f7454c74e" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "serde_json" +version = "1.0.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcac07dbffa1c65e7f816ab9eba78eb142c6d44410f4eeba1e26e4f5dfa56b95" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" +dependencies = [ + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", +] + +[[package]] +name = "sha2" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e7aab86fe2149bad8c507606bdb3f4ef5e7b2380eb92350f56122cca72a42a8" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", +] + +[[package]] +name = "sharded-slab" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4921be914e16899a80adefb821f8ddb7974e3f1250223575a44ed994882127" +dependencies = [ + "lazy_static", + "loom", +] + +[[package]] +name = "shared_memory" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf3ab0cdff84d6c66fc9e268010ea6508e58ee942575afb66f2cf194bb218bb4" +dependencies = [ + "cfg-if 0.1.10", + "enum_primitive", + "libc", + "log", + "memrange", + "nix", + "quick-error", + "rand 0.4.6", + "shared_memory_derive", + "theban_interval_tree", + "winapi", +] + +[[package]] +name = "shared_memory_derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "767a14f1304be2f0b04e69860252f8ae9cfae0afaa9cc07b675147c43425dd3a" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "syn 0.15.44", +] + +[[package]] +name = "signature" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f060a7d147e33490ec10da418795238fd7545bba241504d6b31a409f2e6210" + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "smallvec" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7acad6f34eb9e8a259d3283d1e8c1d34d7415943d4895f65cc73813c7396fc85" + +[[package]] +name = "sp-allocator" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "derive_more", + "log", + "sp-core", + "sp-std", + "sp-wasm-interface", +] + +[[package]] +name = "sp-api" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "hash-db", + "parity-scale-codec", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-version", +] + +[[package]] +name = "sp-api-proc-macro" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "blake2-rfc", + "proc-macro-crate", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "sp-application-crypto" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-arithmetic" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "integer-sqrt", + "num-traits 0.2.14", + "parity-scale-codec", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-authority-discovery" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "parity-scale-codec", + "sp-api", + "sp-application-crypto", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-core" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "base58", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits 0.2.14", + "parity-scale-codec", + "parity-util-mem", + "parking_lot 0.10.2", + "primitive-types", + "rand 0.7.3", + "regex", + "schnorrkel", + "secrecy", + "serde", + "sha2 0.8.2", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize", +] + +[[package]] +name = "sp-debug-derive" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "sp-externalities" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std", + "sp-storage", +] + +[[package]] +name = "sp-inherents" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "parity-scale-codec", + "parking_lot 0.10.2", + "sp-core", + "sp-std", + "thiserror", +] + +[[package]] +name = "sp-io" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "futures", + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "parking_lot 0.10.2", + "sp-core", + "sp-externalities", + "sp-keystore", + "sp-runtime-interface", + "sp-state-machine", + "sp-std", + "sp-tracing", + "sp-trie", + "sp-wasm-interface", + "tracing", + "tracing-core", +] + +[[package]] +name = "sp-keystore" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "async-trait", + "derive_more", + "futures", + "merlin", + "parity-scale-codec", + "parking_lot 0.10.2", + "schnorrkel", + "sp-core", + "sp-externalities", +] + +[[package]] +name = "sp-panic-handler" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "backtrace", +] + +[[package]] +name = "sp-runtime" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "either", + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-runtime-interface" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-storage", + "sp-tracing", + "sp-wasm-interface", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "sp-serializer" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "serde", + "serde_json", +] + +[[package]] +name = "sp-staking" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "parity-scale-codec", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-state-machine" +version = "0.8.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "hash-db", + "log", + "num-traits 0.2.14", + "parity-scale-codec", + "parking_lot 0.10.2", + "rand 0.7.3", + "smallvec", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-std", + "sp-trie", + "thiserror", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-std" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" + +[[package]] +name = "sp-storage" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-tasks" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "log", + "sp-core", + "sp-externalities", + "sp-io", + "sp-runtime-interface", + "sp-std", +] + +[[package]] +name = "sp-tracing" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "log", + "parity-scale-codec", + "sp-std", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sp-trie" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "sp-core", + "sp-std", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-version" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "serde", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-wasm-interface" +version = "2.0.0" +source = "git+https://github.com/paritytech/substrate?branch=master#cff25fbc37c9fc564f8816eefdcd8dce15e1606b" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-std", + "wasmi", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "substrate-bip39" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bed6646a0159b9935b5d045611560eeef842b78d7adc3ba36f5ca325a13a0236" +dependencies = [ + "hmac 0.7.1", + "pbkdf2 0.3.0", + "schnorrkel", + "sha2 0.8.2", + "zeroize", +] + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343f3f510c2915908f155e94f17220b19ccfacf2a64a2a5d8004f2c3e311e7fd" + +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +dependencies = [ + "proc-macro2 0.4.30", + "quote 0.6.13", + "unicode-xid 0.1.0", +] + +[[package]] +name = "syn" +version = "1.0.51" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b4f34193997d92804d359ed09953e25d5138df6bcc055a71bf68ee89fdf9223" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "unicode-xid 0.2.1", +] + +[[package]] +name = "synstructure" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", + "unicode-xid 0.2.1", +] + +[[package]] +name = "theban_interval_tree" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7b42a5385db9a651628091edcd1d58ac9cb1c92327d8cd2a29bf8e35bdfe4ea" +dependencies = [ + "memrange", + "rand 0.3.23", + "time", +] + +[[package]] +name = "thiserror" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e9ae34b84616eedaaf1e9dd6026dbe00dcafa92aa0c8077cb69df1fcfe5e53e" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ba20f23e85b10754cd195504aebf6a27e2e6cbe28c17778a0c930724628dd56" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "time" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + +[[package]] +name = "tiny-bip39" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" +dependencies = [ + "anyhow", + "hmac 0.8.1", + "once_cell", + "pbkdf2 0.4.0", + "rand 0.7.3", + "rustc-hash", + "sha2 0.9.2", + "thiserror", + "unicode-normalization", + "zeroize", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinyvec" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf8dbc19eb42fba10e8feaaec282fb50e2c14b2726d6301dbfeed0f73306a6f" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "toml" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75cf45bb0bef80604d001caaec0d09da99611b3c0fd39d3080468875cdb65645" +dependencies = [ + "serde", +] + +[[package]] +name = "tracing" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f47026cdc4080c07e49b37087de021820269d996f581aac150ef9e5583eefe3" +dependencies = [ + "cfg-if 1.0.0", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80e0ccfc3378da0cce270c946b676a376943f5cd16aeba64568e7939806f4ada" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", +] + +[[package]] +name = "tracing-core" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tracing-log" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1fa8f0c8f4c594e4fc9debc1990deab13238077271ba84dd853d54902ee3401" +dependencies = [ + "ansi_term", + "chrono", + "lazy_static", + "matchers", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "trie-db" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e55f7ace33d6237e14137e386f4e1672e2a5c6bbc97fef9f438581a143971f0" +dependencies = [ + "hash-db", + "hashbrown", + "log", + "rustc-hex", + "smallvec", +] + +[[package]] +name = "trie-root" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" +dependencies = [ + "hash-db", +] + +[[package]] +name = "twox-hash" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" +dependencies = [ + "cfg-if 0.1.10", + "rand 0.7.3", + "static_assertions", +] + +[[package]] +name = "typenum" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" + +[[package]] +name = "uint" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9db035e67dfaf7edd9aebfe8676afcd63eed53c8a4044fed514c8cccf1835177" +dependencies = [ + "byteorder", + "crunchy", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a13e63ab62dbe32aeee58d1c5408d35c36c392bba5d9d3142287219721afe606" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" + +[[package]] +name = "unicode-xid" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" + +[[package]] +name = "version_check" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "wasmi" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +dependencies = [ + "libc", + "memory_units", + "num-rational", + "num-traits 0.2.14", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "zeroize" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f33972566adbd2d3588b0491eb94b98b43695c4ef897903470ede4f3f5a28a" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" +dependencies = [ + "proc-macro2 1.0.24", + "quote 1.0.7", + "syn 1.0.51", + "synstructure", +] diff --git a/erasure-coding/fuzzer/Cargo.toml b/erasure-coding/fuzzer/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e844e9ad94c8872eee327410d4871dae98131a9f --- /dev/null +++ b/erasure-coding/fuzzer/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "erasure_coding_fuzzer" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +polkadot-erasure-coding = { path = ".." } +honggfuzz = "0.5" +primitives = { package = "polkadot-primitives", path = "../../primitives/" } + +[[bin]] +name = "reconstruct_fuzzer" +path = "src/reconstruct.rs" + +[[bin]] +name = "round_trip" +path = "src/round_trip.rs" + +[workspace] diff --git a/erasure-coding/fuzzer/src/reconstruct.rs b/erasure-coding/fuzzer/src/reconstruct.rs new file mode 100644 index 0000000000000000000000000000000000000000..694953e58d1fa81ca1785aeb2bead76c9a2157e1 --- /dev/null +++ b/erasure-coding/fuzzer/src/reconstruct.rs @@ -0,0 +1,16 @@ +use polkadot_erasure_coding::*; +use primitives::v1::AvailableData; +use honggfuzz::fuzz; + +fn main() { + loop { + fuzz!(|data: (usize, Vec<(Vec, usize)>)| { + let (num_validators, chunk_input) = data; + let reconstructed: Result = reconstruct_v1( + num_validators, + chunk_input.iter().map(|t| (&*t.0, t.1)).collect::>() + ); + println!("reconstructed {:?}", reconstructed); + }); + } +} diff --git a/erasure-coding/fuzzer/src/round_trip.rs b/erasure-coding/fuzzer/src/round_trip.rs new file mode 100644 index 0000000000000000000000000000000000000000..141e86073b06d04a337ab566c9d7c71f630beb4f --- /dev/null +++ b/erasure-coding/fuzzer/src/round_trip.rs @@ -0,0 +1,40 @@ +use polkadot_erasure_coding::*; +use primitives::v1::{AvailableData, BlockData, PoV}; +use std::sync::Arc; +use honggfuzz::fuzz; + + +fn main() { + loop { + fuzz!(|data: &[u8]| { + let pov_block = PoV { + block_data: BlockData(data.iter().cloned().collect()), + }; + + let available_data = AvailableData { + pov: Arc::new(pov_block), + validation_data: Default::default(), + }; + let chunks = obtain_chunks_v1( + 10, + &available_data, + ).unwrap(); + + assert_eq!(chunks.len(), 10); + + // any 4 chunks should work. + let reconstructed: AvailableData = reconstruct_v1( + 10, + [ + (&*chunks[1], 1), + (&*chunks[4], 4), + (&*chunks[6], 6), + (&*chunks[9], 9), + ].iter().cloned(), + ).unwrap(); + + assert_eq!(reconstructed, available_data); + println!("{:?}", reconstructed); + }); + } +} diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs index e20e36c259053ddde0e007dc91b3d3c494252591..370c228e3469ca71fbe1cecb1f2ea82f03658caa 100644 --- a/erasure-coding/src/lib.rs +++ b/erasure-coding/src/lib.rs @@ -24,7 +24,7 @@ //! f is the maximum number of faulty validators in the system. //! The data is coded so any f+1 chunks can be used to reconstruct the full data. -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use reed_solomon::galois_16::{self, ReedSolomon}; use primitives::v0::{self, Hash as H256, BlakeTwo256, HashT}; use primitives::v1; @@ -45,9 +45,9 @@ pub enum Error { /// Returned when there are too many validators. #[error("There are too many validators")] TooManyValidators, - /// Cannot encode something for no validators - #[error("Validator set is empty")] - EmptyValidators, + /// Cannot encode something for zero or one validator + #[error("Expected at least 2 validators")] + NotEnoughValidators, /// Cannot reconstruct: wrong number of validators. #[error("Validator count mismatches between encoding and decoding")] WrongValidatorCount, @@ -122,7 +122,7 @@ impl CodeParams { fn code_params(n_validators: usize) -> Result { if n_validators > MAX_VALIDATORS { return Err(Error::TooManyValidators) } - if n_validators == 0 { return Err(Error::EmptyValidators) } + if n_validators <= 1 { return Err(Error::NotEnoughValidators) } let n_faulty = n_validators.saturating_sub(1) / 3; let n_good = n_validators - n_faulty; @@ -352,12 +352,12 @@ struct ShardInput<'a, I> { cur_shard: Option<(&'a [u8], usize)>, } -impl<'a, I: Iterator> codec::Input for ShardInput<'a, I> { - fn remaining_len(&mut self) -> Result, codec::Error> { +impl<'a, I: Iterator> parity_scale_codec::Input for ShardInput<'a, I> { + fn remaining_len(&mut self) -> Result, parity_scale_codec::Error> { Ok(Some(self.remaining_len)) } - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + fn read(&mut self, into: &mut [u8]) -> Result<(), parity_scale_codec::Error> { let mut read_bytes = 0; loop { @@ -406,12 +406,9 @@ mod tests { #[test] fn test_code_params() { - assert_eq!(code_params(0), Err(Error::EmptyValidators)); + assert_eq!(code_params(0), Err(Error::NotEnoughValidators)); - assert_eq!(code_params(1), Ok(CodeParams { - data_shards: 1, - parity_shards: 0, - })); + assert_eq!(code_params(1), Err(Error::NotEnoughValidators)); assert_eq!(code_params(2), Ok(CodeParams { data_shards: 1, @@ -487,6 +484,15 @@ mod tests { assert_eq!(reconstructed, available_data); } + #[test] + fn reconstruct_does_not_panic_on_low_validator_count() { + let reconstructed = reconstruct_v1( + 1, + [].iter().cloned(), + ); + assert_eq!(reconstructed, Err(Error::NotEnoughValidators)); + } + #[test] fn construct_valid_branches() { let pov_block = PoVBlock { diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml index 0e302ae6884f524812dacb3cf8b8c76d61c3661e..d0b0919d0fd4a9efbcaa6abeb774162b1608b442 100644 --- a/node/collation-generation/Cargo.toml +++ b/node/collation-generation/Cargo.toml @@ -5,15 +5,16 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.8" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" polkadot-erasure-coding = { path = "../../erasure-coding" } polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.21" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +thiserror = "1.0.22" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 271d73999fc862ccb03b051169f90dc5c8da1840..b3e62cdf1ce56db32cedae800c4d9c3e6f54a846 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -74,6 +74,7 @@ impl CollationGenerationSubsystem { /// /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. /// Otherwise, most are logged and then discarded. + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(mut self, mut ctx: Context) where Context: SubsystemContext, @@ -83,8 +84,9 @@ impl CollationGenerationSubsystem { // at any point waiting for them all, so instead, we create a channel on which they can // send those messages. We can then just monitor the channel and forward messages on it // to the overseer here, via the context. - let (sender, mut receiver) = mpsc::channel(0); + let (sender, receiver) = mpsc::channel(0); + let mut receiver = receiver.fuse(); loop { select! { incoming = ctx.recv().fuse() => { @@ -92,12 +94,9 @@ impl CollationGenerationSubsystem { break; } }, - msg = receiver.next().fuse() => { + msg = receiver.next() => { if let Some(msg) = msg { - if let Err(err) = ctx.send_message(msg).await { - log::warn!(target: LOG_TARGET, "failed to forward message to overseer: {:?}", err); - break; - } + ctx.send_message(msg).await; } }, } @@ -108,6 +107,7 @@ impl CollationGenerationSubsystem { // note: this doesn't strictly need to be a separate function; it's more an administrative function // so that we don't clutter the run loop. It could in principle be inlined directly into there. // it should hopefully therefore be ok that it's an async function mutably borrowing self. + #[tracing::instrument(level = "trace", skip(self, ctx, sender), fields(subsystem = LOG_TARGET))] async fn handle_incoming( &mut self, incoming: SubsystemResult>, @@ -129,8 +129,7 @@ impl CollationGenerationSubsystem { if let Err(err) = handle_new_activations(config.clone(), &activated, ctx, metrics, sender).await { - log::warn!(target: LOG_TARGET, "failed to handle new activations: {:?}", err); - return true; + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to handle new activations"); }; } false @@ -140,17 +139,17 @@ impl CollationGenerationSubsystem { msg: CollationGenerationMessage::Initialize(config), }) => { if self.config.is_some() { - log::warn!(target: LOG_TARGET, "double initialization"); - true + tracing::error!(target: LOG_TARGET, "double initialization"); } else { self.config = Some(Arc::new(config)); - false } + false } Ok(Signal(BlockFinalized(_))) => false, Err(err) => { - log::error!( + tracing::error!( target: LOG_TARGET, + err = ?err, "error receiving message from subsystem context: {:?}", err ); @@ -165,7 +164,10 @@ where Context: SubsystemContext, { fn start(self, ctx: Context) -> SpawnedSubsystem { - let future = Box::pin(self.run(ctx)); + let future = Box::pin(async move { + self.run(ctx).await; + Ok(()) + }); SpawnedSubsystem { name: "collation-generation-subsystem", @@ -174,6 +176,7 @@ where } } +#[tracing::instrument(level = "trace", skip(ctx, metrics, sender), fields(subsystem = LOG_TARGET))] async fn handle_new_activations( config: Arc, activated: &[Hash], @@ -184,7 +187,11 @@ async fn handle_new_activations( // follow the procedure from the guide: // https://w3f.github.io/parachain-implementers-guide/node/collators/collation-generation.html + let _overall_timer = metrics.time_new_activations(); + for relay_parent in activated.iter().copied() { + let _relay_parent_timer = metrics.time_new_activations_relay_parent(); + // double-future magic happens here: the first layer of requests takes a mutable borrow of the context, and // returns a receiver. The second layer of requests actually polls those receivers to completion. let (availability_cores, validators) = join!( @@ -196,6 +203,8 @@ async fn handle_new_activations( let n_validators = validators??.len(); for core in availability_cores { + let _availability_core_timer = metrics.time_new_activations_availability_core(); + let (scheduled_core, assumption) = match core { CoreState::Scheduled(scheduled_core) => { (scheduled_core, OccupiedCoreAssumption::Free) @@ -236,10 +245,10 @@ async fn handle_new_activations( let collation = match (task_config.collator)(relay_parent, &validation_data).await { Some(collation) => collation, None => { - log::debug!( + tracing::debug!( target: LOG_TARGET, - "collator returned no collation on collate for para_id {}.", - scheduled_core.para_id, + para_id = %scheduled_core.para_id, + "collator returned no collation on collate", ); return } @@ -261,11 +270,11 @@ async fn handle_new_activations( ) { Ok(erasure_root) => erasure_root, Err(err) => { - log::error!( + tracing::error!( target: LOG_TARGET, - "failed to calculate erasure root for para_id {}: {:?}", - scheduled_core.para_id, - err + para_id = %scheduled_core.para_id, + err = ?err, + "failed to calculate erasure root", ); return } @@ -273,10 +282,11 @@ async fn handle_new_activations( let commitments = CandidateCommitments { upward_messages: collation.upward_messages, + horizontal_messages: collation.horizontal_messages, new_validation_code: collation.new_validation_code, head_data: collation.head_data, - erasure_root, processed_downward_messages: collation.processed_downward_messages, + hrmp_watermark: collation.hrmp_watermark, }; let ccr = CandidateReceipt { @@ -288,6 +298,7 @@ async fn handle_new_activations( collator: task_config.key.public(), persisted_validation_data_hash, pov_hash, + erasure_root, }, }; @@ -296,11 +307,11 @@ async fn handle_new_activations( if let Err(err) = task_sender.send(AllMessages::CollatorProtocol( CollatorProtocolMessage::DistributeCollation(ccr, collation.proof_of_validity) )).await { - log::warn!( + tracing::warn!( target: LOG_TARGET, - "failed to send collation result for para_id {}: {:?}", - scheduled_core.para_id, - err + para_id = %scheduled_core.para_id, + err = ?err, + "failed to send collation result", ); } })).await?; @@ -310,6 +321,7 @@ async fn handle_new_activations( Ok(()) } +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn erasure_root( n_validators: usize, persisted_validation: PersistedValidationData, @@ -317,7 +329,7 @@ fn erasure_root( ) -> crate::error::Result { let available_data = AvailableData { validation_data: persisted_validation, - pov, + pov: Arc::new(pov), }; let chunks = polkadot_erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; @@ -327,6 +339,9 @@ fn erasure_root( #[derive(Clone)] struct MetricsInner { collations_generated_total: prometheus::Counter, + new_activations_overall: prometheus::Histogram, + new_activations_per_relay_parent: prometheus::Histogram, + new_activations_per_availability_core: prometheus::Histogram, } /// CollationGenerationSubsystem metrics. @@ -339,6 +354,21 @@ impl Metrics { metrics.collations_generated_total.inc(); } } + + /// Provide a timer for new activations which updates on drop. + fn time_new_activations(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.new_activations_overall.start_timer()) + } + + /// Provide a timer per relay parents which updates on drop. + fn time_new_activations_relay_parent(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.new_activations_per_relay_parent.start_timer()) + } + + /// Provide a timer per availability core which updates on drop. + fn time_new_activations_availability_core(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.new_activations_per_availability_core.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -351,6 +381,33 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + new_activations_overall: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_collation_generation_new_activations", + "Time spent within fn handle_new_activations", + ) + )?, + registry, + )?, + new_activations_per_relay_parent: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_collation_generation_per_relay_parent", + "Time spent handling a particular relay parent within fn handle_new_activations" + ) + )?, + registry, + )?, + new_activations_per_availability_core: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_collation_generation_per_availability_core", + "Time spent handling a particular availability core for a relay parent in fn handle_new_activations", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } @@ -381,12 +438,14 @@ mod tests { fn test_collation() -> Collation { Collation { upward_messages: Default::default(), + horizontal_messages: Default::default(), new_validation_code: Default::default(), head_data: Default::default(), proof_of_validity: PoV { block_data: BlockData(Vec::new()), }, processed_downward_messages: Default::default(), + hrmp_watermark: Default::default(), } } @@ -644,6 +703,7 @@ mod tests { collator: config.key.public(), persisted_validation_data_hash: expect_validation_data_hash, pov_hash: expect_pov_hash, + erasure_root: Default::default(), // this isn't something we're checking right now }; assert_eq!(sent_messages.len(), 1); @@ -670,6 +730,7 @@ mod tests { let expect_descriptor = { let mut expect_descriptor = expect_descriptor; expect_descriptor.signature = descriptor.signature.clone(); + expect_descriptor.erasure_root = descriptor.erasure_root.clone(); expect_descriptor }; assert_eq!(descriptor, &expect_descriptor); diff --git a/node/core/av-store/Cargo.toml b/node/core/av-store/Cargo.toml index 8c22eab5a75b0d107ca6041e1dd7b9b9f01402a5..c9483c0c09bdc9f6256fa542ae822dea57678be4 100644 --- a/node/core/av-store/Cargo.toml +++ b/node/core/av-store/Cargo.toml @@ -5,28 +5,30 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" +futures = "0.3.8" futures-timer = "3.0.2" kvdb = "0.7.0" kvdb-rocksdb = "0.9.1" -log = "0.4.11" -thiserror = "1.0.21" +thiserror = "1.0.22" +tracing = "0.1.22" +tracing-futures = "0.2.4" -codec = { package = "parity-scale-codec", version = "1.3.1", features = ["derive"] } +parity-scale-codec = { version = "1.3.5", features = ["derive"] } erasure = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } [dev-dependencies] -env_logger = "0.7.1" -assert_matches = "1.3.0" -smallvec = "1.4.2" +log = "0.4.11" +env_logger = "0.8.2" +assert_matches = "1.4.0" +smallvec = "1.5.1" kvdb-memorydb = "0.7.0" -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index d16c2b16f240f1be94fa69cd60bcb256a6770a57..72c1d9cb4ccab7629f8b7d4aff05f9af77b9bcc3 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -26,14 +26,14 @@ use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, SystemTime, SystemTimeError, UNIX_EPOCH}; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use futures::{select, channel::oneshot, future::{self, Either}, Future, FutureExt}; use futures_timer::Delay; use kvdb_rocksdb::{Database, DatabaseConfig}; use kvdb::{KeyValueDB, DBTransaction}; use polkadot_primitives::v1::{ - Hash, AvailableData, BlockNumber, CandidateEvent, ErasureChunk, ValidatorIndex, + Hash, AvailableData, BlockNumber, CandidateEvent, ErasureChunk, ValidatorIndex, CandidateHash, }; use polkadot_subsystem::{ FromOverseer, OverseerSignal, SubsystemError, Subsystem, SubsystemContext, SpawnedSubsystem, @@ -57,9 +57,9 @@ mod columns { #[derive(Debug, Error)] enum Error { #[error(transparent)] - RuntimeAPI(#[from] RuntimeApiError), + RuntimeApi(#[from] RuntimeApiError), #[error(transparent)] - ChainAPI(#[from] ChainApiError), + ChainApi(#[from] ChainApiError), #[error(transparent)] Erasure(#[from] erasure::Error), #[error(transparent)] @@ -72,6 +72,18 @@ enum Error { Time(#[from] SystemTimeError), } +impl Error { + fn trace(&self) { + match self { + // don't spam the log with spurious errors + Self::RuntimeApi(_) | + Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self), + // it's worth reporting otherwise + _ => tracing::warn!(target: LOG_TARGET, err = ?self), + } + } +} + /// A wrapper type for delays. #[derive(Debug, Decode, Encode, Eq)] enum PruningDelay { @@ -93,48 +105,48 @@ impl PruningDelay { fn as_duration(&self) -> Option { match self { - PruningDelay::In(d) => Some(*d), - PruningDelay::Indefinite => None, + PruningDelay::In(d) => Some(*d), + PruningDelay::Indefinite => None, } } } impl From for PruningDelay { - fn from(d: Duration) -> Self { + fn from(d: Duration) -> Self { Self::In(d) - } + } } impl PartialEq for PruningDelay { - fn eq(&self, other: &Self) -> bool { + fn eq(&self, other: &Self) -> bool { match (self, other) { - (PruningDelay::In(this), PruningDelay::In(that)) => {this == that}, - (PruningDelay::Indefinite, PruningDelay::Indefinite) => true, + (PruningDelay::In(this), PruningDelay::In(that)) => {this == that}, + (PruningDelay::Indefinite, PruningDelay::Indefinite) => true, _ => false, } - } + } } impl PartialOrd for PruningDelay { - fn partial_cmp(&self, other: &Self) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { match (self, other) { - (PruningDelay::In(this), PruningDelay::In(that)) => this.partial_cmp(that), - (PruningDelay::In(_), PruningDelay::Indefinite) => Some(Ordering::Less), - (PruningDelay::Indefinite, PruningDelay::In(_)) => Some(Ordering::Greater), - (PruningDelay::Indefinite, PruningDelay::Indefinite) => Some(Ordering::Equal), + (PruningDelay::In(this), PruningDelay::In(that)) => this.partial_cmp(that), + (PruningDelay::In(_), PruningDelay::Indefinite) => Some(Ordering::Less), + (PruningDelay::Indefinite, PruningDelay::In(_)) => Some(Ordering::Greater), + (PruningDelay::Indefinite, PruningDelay::Indefinite) => Some(Ordering::Equal), } - } + } } impl Ord for PruningDelay { - fn cmp(&self, other: &Self) -> Ordering { + fn cmp(&self, other: &Self) -> Ordering { match (self, other) { - (PruningDelay::In(this), PruningDelay::In(that)) => this.cmp(that), - (PruningDelay::In(_), PruningDelay::Indefinite) => Ordering::Less, - (PruningDelay::Indefinite, PruningDelay::In(_)) => Ordering::Greater, - (PruningDelay::Indefinite, PruningDelay::Indefinite) => Ordering::Equal, + (PruningDelay::In(this), PruningDelay::In(that)) => this.cmp(that), + (PruningDelay::In(_), PruningDelay::Indefinite) => Ordering::Less, + (PruningDelay::Indefinite, PruningDelay::In(_)) => Ordering::Greater, + (PruningDelay::Indefinite, PruningDelay::Indefinite) => Ordering::Equal, } - } + } } /// A key for chunk pruning records. @@ -212,13 +224,13 @@ struct PruningConfig { } impl Default for PruningConfig { - fn default() -> Self { + fn default() -> Self { Self { keep_stored_block_for: KEEP_STORED_BLOCK_FOR, keep_finalized_block_for: KEEP_FINALIZED_BLOCK_FOR, keep_finalized_chunk_for: KEEP_FINALIZED_CHUNK_FOR, } - } + } } #[derive(Debug, Decode, Encode, Eq, PartialEq)] @@ -230,7 +242,7 @@ enum CandidateState { #[derive(Debug, Decode, Encode, Eq)] struct PoVPruningRecord { - candidate_hash: Hash, + candidate_hash: CandidateHash, block_number: BlockNumber, candidate_state: CandidateState, prune_at: PruningDelay, @@ -253,14 +265,14 @@ impl Ord for PoVPruningRecord { } impl PartialOrd for PoVPruningRecord { - fn partial_cmp(&self, other: &Self) -> Option { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) - } + } } #[derive(Debug, Decode, Encode, Eq)] struct ChunkPruningRecord { - candidate_hash: Hash, + candidate_hash: CandidateHash, block_number: BlockNumber, candidate_state: CandidateState, chunk_index: u32, @@ -299,18 +311,21 @@ pub struct AvailabilityStoreSubsystem { impl AvailabilityStoreSubsystem { // Perform pruning of PoVs + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn prune_povs(&self) -> Result<(), Error> { + let _timer = self.metrics.time_prune_povs(); + let mut tx = DBTransaction::new(); let mut pov_pruning = pov_pruning(&self.inner).unwrap_or_default(); let now = PruningDelay::now()?; - log::trace!(target: LOG_TARGET, "Pruning PoVs"); + tracing::trace!(target: LOG_TARGET, "Pruning PoVs"); let outdated_records_count = pov_pruning.iter() .take_while(|r| r.prune_at <= now) .count(); for record in pov_pruning.drain(..outdated_records_count) { - log::trace!(target: LOG_TARGET, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record"); tx.delete( columns::DATA, available_data_key(&record.candidate_hash).as_slice(), @@ -323,18 +338,21 @@ impl AvailabilityStoreSubsystem { } // Perform pruning of chunks. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn prune_chunks(&self) -> Result<(), Error> { + let _timer = self.metrics.time_prune_chunks(); + let mut tx = DBTransaction::new(); let mut chunk_pruning = chunk_pruning(&self.inner).unwrap_or_default(); let now = PruningDelay::now()?; - log::trace!(target: LOG_TARGET, "Pruning Chunks"); + tracing::trace!(target: LOG_TARGET, "Pruning Chunks"); let outdated_records_count = chunk_pruning.iter() .take_while(|r| r.prune_at <= now) .count(); for record in chunk_pruning.drain(..outdated_records_count) { - log::trace!(target: LOG_TARGET, "Removing record {:?}", record); + tracing::trace!(target: LOG_TARGET, record = ?record, "Removing record"); tx.delete( columns::DATA, erasure_chunk_key(&record.candidate_hash, record.chunk_index).as_slice(), @@ -349,6 +367,7 @@ impl AvailabilityStoreSubsystem { // Return a `Future` that either resolves when another PoV pruning has to happen // or is indefinitely `pending` in case no pruning has to be done. // Just a helper to `select` over multiple things at once. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn maybe_prune_povs(&self) -> Result, Error> { let future = match get_next_pov_pruning_time(&self.inner) { Some(pruning) => { @@ -363,6 +382,7 @@ impl AvailabilityStoreSubsystem { // Return a `Future` that either resolves when another chunk pruning has to happen // or is indefinitely `pending` in case no pruning has to be done. // Just a helper to `select` over multiple things at once. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn maybe_prune_chunks(&self) -> Result, Error> { let future = match get_next_chunk_pruning_time(&self.inner) { Some(pruning) => { @@ -375,11 +395,11 @@ impl AvailabilityStoreSubsystem { } } -fn available_data_key(candidate_hash: &Hash) -> Vec { +fn available_data_key(candidate_hash: &CandidateHash) -> Vec { (candidate_hash, 0i8).encode() } -fn erasure_chunk_key(candidate_hash: &Hash, index: u32) -> Vec { +fn erasure_chunk_key(candidate_hash: &CandidateHash, index: u32) -> Vec { (candidate_hash, index, 0i8).encode() } @@ -461,57 +481,72 @@ fn get_next_chunk_pruning_time(db: &Arc) -> Option(mut subsystem: AvailabilityStoreSubsystem, mut ctx: Context) - -> Result<(), Error> where Context: SubsystemContext, { loop { - // Every time the following two methods are called a read from DB is performed. - // But given that these are very small values which are essentially a newtype - // wrappers around `Duration` (`NextChunkPruning` and `NextPoVPruning`) and also the - // fact of the frequent reads itself we assume these to end up cached in the memory - // anyway and thus these db reads to be reasonably fast. - let pov_pruning_time = subsystem.maybe_prune_povs()?; - let chunk_pruning_time = subsystem.maybe_prune_chunks()?; - - let mut pov_pruning_time = pov_pruning_time.fuse(); - let mut chunk_pruning_time = chunk_pruning_time.fuse(); - - select! { - incoming = ctx.recv().fuse() => { - match incoming { - Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => break, - Ok(FromOverseer::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate { activated, .. }) - )) => { - for activated in activated.into_iter() { - process_block_activated(&mut ctx, &subsystem.inner, activated).await?; - } - } - Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(hash))) => { - process_block_finalized(&subsystem, &mut ctx, &subsystem.inner, hash).await?; - } - Ok(FromOverseer::Communication { msg }) => { - process_message(&mut subsystem, &mut ctx, msg).await?; + let res = run_iteration(&mut subsystem, &mut ctx).await; + match res { + Err(e) => { + e.trace(); + } + Ok(true) => { + tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); + break; + }, + Ok(false) => continue, + } + } +} + +#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] +async fn run_iteration(subsystem: &mut AvailabilityStoreSubsystem, ctx: &mut Context) + -> Result +where + Context: SubsystemContext, +{ + // Every time the following two methods are called a read from DB is performed. + // But given that these are very small values which are essentially a newtype + // wrappers around `Duration` (`NextChunkPruning` and `NextPoVPruning`) and also the + // fact of the frequent reads itself we assume these to end up cached in the memory + // anyway and thus these db reads to be reasonably fast. + let pov_pruning_time = subsystem.maybe_prune_povs()?; + let chunk_pruning_time = subsystem.maybe_prune_chunks()?; + + let mut pov_pruning_time = pov_pruning_time.fuse(); + let mut chunk_pruning_time = chunk_pruning_time.fuse(); + + select! { + incoming = ctx.recv().fuse() => { + match incoming? { + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(true), + FromOverseer::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate { activated, .. }) + ) => { + for activated in activated.into_iter() { + process_block_activated(ctx, &subsystem.inner, activated, &subsystem.metrics).await?; } - Err(e) => { - log::error!("AvailabilityStoreSubsystem err: {:#?}", e); - break - }, + } + FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => { + process_block_finalized(subsystem, ctx, &subsystem.inner, hash).await?; + } + FromOverseer::Communication { msg } => { + process_message(subsystem, ctx, msg).await?; } } - pov_pruning_time = pov_pruning_time => { - subsystem.prune_povs()?; - } - chunk_pruning_time = chunk_pruning_time => { - subsystem.prune_chunks()?; - } - complete => break, } + _ = pov_pruning_time => { + subsystem.prune_povs()?; + } + _ = chunk_pruning_time => { + subsystem.prune_chunks()?; + } + complete => return Ok(true), } - Ok(()) + Ok(false) } /// As soon as certain block is finalized its pruning records and records of all @@ -520,6 +555,7 @@ where /// The state of data has to be changed from /// `CandidateState::Included` to `CandidateState::Finalized` and their pruning times have /// to be updated to `now` + keep_finalized_{block, chunk}_for`. +#[tracing::instrument(level = "trace", skip(subsystem, ctx, db), fields(subsystem = LOG_TARGET))] async fn process_block_finalized( subsystem: &AvailabilityStoreSubsystem, ctx: &mut Context, @@ -529,6 +565,8 @@ async fn process_block_finalized( where Context: SubsystemContext { + let _timer = subsystem.metrics.time_process_block_finalized(); + let block_number = get_block_number(ctx, hash).await?; if let Some(mut pov_pruning) = pov_pruning(db) { @@ -536,10 +574,10 @@ where // numbers we have to iterate through the whole collection here. for record in pov_pruning.iter_mut() { if record.block_number <= block_number { - log::trace!( + tracing::trace!( target: LOG_TARGET, - "Updating pruning record for finalized block {}", - record.candidate_hash, + block_number = %record.block_number, + "Updating pruning record for finalized block", ); record.prune_at = PruningDelay::into_the_future( @@ -555,10 +593,10 @@ where if let Some(mut chunk_pruning) = chunk_pruning(db) { for record in chunk_pruning.iter_mut() { if record.block_number <= block_number { - log::trace!( + tracing::trace!( target: LOG_TARGET, - "Updating chunk pruning record for finalized block {}", - record.candidate_hash, + block_number = %record.block_number, + "Updating chunk pruning record for finalized block", ); record.prune_at = PruningDelay::into_the_future( @@ -574,22 +612,36 @@ where Ok(()) } +#[tracing::instrument(level = "trace", skip(ctx, db, metrics), fields(subsystem = LOG_TARGET))] async fn process_block_activated( ctx: &mut Context, db: &Arc, hash: Hash, + metrics: &Metrics, ) -> Result<(), Error> where Context: SubsystemContext { - let events = request_candidate_events(ctx, hash).await?; + let _timer = metrics.time_block_activated(); + + let events = match request_candidate_events(ctx, hash).await { + Ok(events) => events, + Err(err) => { + tracing::debug!(target: LOG_TARGET, err = ?err, "requesting candidate events failed"); + return Ok(()); + } + }; - log::trace!(target: LOG_TARGET, "block activated {}", hash); + tracing::trace!(target: LOG_TARGET, hash = %hash, "block activated"); let mut included = HashSet::new(); for event in events.into_iter() { if let CandidateEvent::CandidateIncluded(receipt, _) = event { - log::trace!(target: LOG_TARGET, "Candidate {} was included", receipt.hash()); + tracing::trace!( + target: LOG_TARGET, + hash = %receipt.hash(), + "Candidate {:?} was included", receipt.hash(), + ); included.insert(receipt.hash()); } } @@ -623,6 +675,7 @@ where Ok(()) } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn request_candidate_events( ctx: &mut Context, hash: Hash, @@ -637,11 +690,12 @@ where RuntimeApiRequest::CandidateEvents(tx), )); - ctx.send_message(msg.into()).await?; + ctx.send_message(msg.into()).await; Ok(rx.await??) } +#[tracing::instrument(level = "trace", skip(subsystem, ctx), fields(subsystem = LOG_TARGET))] async fn process_message( subsystem: &mut AvailabilityStoreSubsystem, ctx: &mut Context, @@ -651,6 +705,9 @@ where Context: SubsystemContext { use AvailabilityStoreMessage::*; + + let _timer = subsystem.metrics.time_process_message(); + match msg { QueryAvailableData(hash, tx) => { tx.send(available_data(&subsystem.inner, &hash).map(|d| d.data)) @@ -697,7 +754,10 @@ where Ok(()) } -fn available_data(db: &Arc, candidate_hash: &Hash) -> Option { +fn available_data( + db: &Arc, + candidate_hash: &CandidateHash, +) -> Option { query_inner(db, columns::DATA, &available_data_key(candidate_hash)) } @@ -709,6 +769,7 @@ fn chunk_pruning(db: &Arc) -> Option> { query_inner(db, columns::META, &CHUNK_PRUNING_KEY) } +#[tracing::instrument(level = "trace", skip(db, tx), fields(subsystem = LOG_TARGET))] fn put_pov_pruning( db: &Arc, tx: Option, @@ -749,6 +810,7 @@ fn put_pov_pruning( Ok(()) } +#[tracing::instrument(level = "trace", skip(db, tx), fields(subsystem = LOG_TARGET))] fn put_chunk_pruning( db: &Arc, tx: Option, @@ -796,18 +858,21 @@ where { let (tx, rx) = oneshot::channel(); - ctx.send_message(AllMessages::ChainApi(ChainApiMessage::BlockNumber(block_hash, tx))).await?; + ctx.send_message(AllMessages::ChainApi(ChainApiMessage::BlockNumber(block_hash, tx))).await; Ok(rx.await??.map(|number| number).unwrap_or_default()) } +#[tracing::instrument(level = "trace", skip(subsystem, available_data), fields(subsystem = LOG_TARGET))] fn store_available_data( subsystem: &mut AvailabilityStoreSubsystem, - candidate_hash: &Hash, + candidate_hash: &CandidateHash, id: Option, n_validators: u32, available_data: AvailableData, ) -> Result<(), Error> { + let _timer = subsystem.metrics.time_store_available_data(); + let mut tx = DBTransaction::new(); let block_number = available_data.validation_data.block_number; @@ -840,7 +905,7 @@ fn store_available_data( } let pruning_record = PoVPruningRecord { - candidate_hash: candidate_hash.clone(), + candidate_hash: *candidate_hash, block_number, candidate_state: CandidateState::Stored, prune_at, @@ -867,13 +932,16 @@ fn store_available_data( Ok(()) } +#[tracing::instrument(level = "trace", skip(subsystem), fields(subsystem = LOG_TARGET))] fn store_chunk( subsystem: &mut AvailabilityStoreSubsystem, - candidate_hash: &Hash, + candidate_hash: &CandidateHash, _n_validators: u32, chunk: ErasureChunk, block_number: BlockNumber, ) -> Result<(), Error> { + let _timer = subsystem.metrics.time_store_chunk(); + let mut tx = DBTransaction::new(); let dbkey = erasure_chunk_key(candidate_hash, chunk.index); @@ -918,11 +986,14 @@ fn store_chunk( Ok(()) } +#[tracing::instrument(level = "trace", skip(subsystem), fields(subsystem = LOG_TARGET))] fn get_chunk( subsystem: &mut AvailabilityStoreSubsystem, - candidate_hash: &Hash, + candidate_hash: &CandidateHash, index: u32, ) -> Result, Error> { + let _timer = subsystem.metrics.time_get_chunk(); + if let Some(chunk) = query_inner( &subsystem.inner, columns::DATA, @@ -949,7 +1020,11 @@ fn get_chunk( Ok(None) } -fn query_inner(db: &Arc, column: u32, key: &[u8]) -> Option { +fn query_inner( + db: &Arc, + column: u32, + key: &[u8], +) -> Option { match db.get(column, key) { Ok(Some(raw)) => { let res = D::decode(&mut &raw[..]).expect("all stored data serialized correctly; qed"); @@ -957,7 +1032,7 @@ fn query_inner(db: &Arc, column: u32, key: &[u8]) -> } Ok(None) => None, Err(e) => { - log::warn!(target: LOG_TARGET, "Error reading from the availability store: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Error reading from the availability store"); None } } @@ -969,9 +1044,7 @@ where { fn start(self, ctx: Context) -> SpawnedSubsystem { let future = run(self, ctx) - .map(|r| if let Err(e) = r { - log::error!(target: "availabilitystore", "Subsystem exited with an error {:?}", e); - }) + .map(|_| Ok(())) .boxed(); SpawnedSubsystem { @@ -981,6 +1054,7 @@ where } } +#[tracing::instrument(level = "trace", skip(metrics), fields(subsystem = LOG_TARGET))] fn get_chunks(data: &AvailableData, n_validators: usize, metrics: &Metrics) -> Result, Error> { let chunks = erasure::obtain_chunks_v1(n_validators, data)?; metrics.on_chunks_received(chunks.len()); @@ -1002,6 +1076,14 @@ fn get_chunks(data: &AvailableData, n_validators: usize, metrics: &Metrics) -> R #[derive(Clone)] struct MetricsInner { received_availability_chunks_total: prometheus::Counter, + prune_povs: prometheus::Histogram, + prune_chunks: prometheus::Histogram, + process_block_finalized: prometheus::Histogram, + block_activated: prometheus::Histogram, + process_message: prometheus::Histogram, + store_available_data: prometheus::Histogram, + store_chunk: prometheus::Histogram, + get_chunk: prometheus::Histogram, } /// Availability metrics. @@ -1017,6 +1099,46 @@ impl Metrics { metrics.received_availability_chunks_total.inc_by(by); } } + + /// Provide a timer for `prune_povs` which observes on drop. + fn time_prune_povs(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.prune_povs.start_timer()) + } + + /// Provide a timer for `prune_chunks` which observes on drop. + fn time_prune_chunks(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.prune_chunks.start_timer()) + } + + /// Provide a timer for `process_block_finalized` which observes on drop. + fn time_process_block_finalized(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_block_finalized.start_timer()) + } + + /// Provide a timer for `block_activated` which observes on drop. + fn time_block_activated(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.block_activated.start_timer()) + } + + /// Provide a timer for `process_message` which observes on drop. + fn time_process_message(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_message.start_timer()) + } + + /// Provide a timer for `store_available_data` which observes on drop. + fn time_store_available_data(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.store_available_data.start_timer()) + } + + /// Provide a timer for `store_chunk` which observes on drop. + fn time_store_chunk(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.store_chunk.start_timer()) + } + + /// Provide a timer for `get_chunk` which observes on drop. + fn time_get_chunk(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.get_chunk.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -1029,6 +1151,78 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + prune_povs: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_prune_povs", + "Time spent within `av_store::prune_povs`", + ) + )?, + registry, + )?, + prune_chunks: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_prune_chunks", + "Time spent within `av_store::prune_chunks`", + ) + )?, + registry, + )?, + process_block_finalized: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_process_block_finalized", + "Time spent within `av_store::block_finalized`", + ) + )?, + registry, + )?, + block_activated: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_block_activated", + "Time spent within `av_store::block_activated`", + ) + )?, + registry, + )?, + process_message: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_process_message", + "Time spent within `av_store::process_message`", + ) + )?, + registry, + )?, + store_available_data: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_store_available_data", + "Time spent within `av_store::store_available_data`", + ) + )?, + registry, + )?, + store_chunk: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_store_chunk", + "Time spent within `av_store::store_chunk`", + ) + )?, + registry, + )?, + get_chunk: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_av_store_get_chunk", + "Time spent within `av_store::get_chunk`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs index 6c1a950f067a5fed0f9b7a7e052f1e5507311efc..f35809a4a72923c9ec19295e3735f888010bc301 100644 --- a/node/core/av-store/src/tests.rs +++ b/node/core/av-store/src/tests.rs @@ -27,10 +27,12 @@ use smallvec::smallvec; use polkadot_primitives::v1::{ AvailableData, BlockData, CandidateDescriptor, CandidateReceipt, HeadData, - PersistedValidationData, PoV, Id as ParaId, + PersistedValidationData, PoV, Id as ParaId, CandidateHash, }; use polkadot_node_subsystem_util::TimeoutExt; -use polkadot_subsystem::ActiveLeavesUpdate; +use polkadot_subsystem::{ + ActiveLeavesUpdate, errors::RuntimeApiError, +}; use polkadot_node_subsystem_test_helpers as test_helpers; struct TestHarness { @@ -71,6 +73,7 @@ impl Default for TestState { block_number: 5, hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), + max_pov_size: 1024, }; let pruning_config = PruningConfig { @@ -125,7 +128,7 @@ async fn overseer_send( overseer: &mut test_helpers::TestSubsystemContextHandle, msg: AvailabilityStoreMessage, ) { - log::trace!("Sending message:\n{:?}", &msg); + tracing::trace!(meg = ?msg, "sending message"); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -140,7 +143,7 @@ async fn overseer_recv( .await .expect(&format!("{:?} is more than enough to receive messages", TIMEOUT)); - log::trace!("Received message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "received message"); msg } @@ -149,7 +152,7 @@ async fn overseer_recv_with_timeout( overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - log::trace!("Waiting for message..."); + tracing::trace!("waiting for message..."); overseer .recv() .timeout(timeout) @@ -167,13 +170,58 @@ async fn overseer_signal( .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); } +#[test] +fn runtime_api_error_does_not_stop_the_subsystem() { + let store = Arc::new(kvdb_memorydb::create(columns::NUM_COLUMNS)); + + test_harness(PruningConfig::default(), store, |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + let new_leaf = Hash::repeat_byte(0x01); + + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: smallvec![new_leaf.clone()], + deactivated: smallvec![], + }), + ).await; + + // runtime api call fails + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::CandidateEvents(tx), + )) => { + assert_eq!(relay_parent, new_leaf); + tx.send(Err(RuntimeApiError::from("oh no".to_string()))).unwrap(); + } + ); + + // but that's fine, we're still alive + let (tx, rx) = oneshot::channel(); + let candidate_hash = CandidateHash(Hash::repeat_byte(33)); + let validator_index = 5; + let query_chunk = AvailabilityStoreMessage::QueryChunk( + candidate_hash, + validator_index, + tx, + ); + + overseer_send(&mut virtual_overseer, query_chunk.into()).await; + + assert!(rx.await.unwrap().is_none()); + + }); +} + #[test] fn store_chunk_works() { let store = Arc::new(kvdb_memorydb::create(columns::NUM_COLUMNS)); test_harness(PruningConfig::default(), store.clone(), |test_harness| async move { let TestHarness { mut virtual_overseer } = test_harness; let relay_parent = Hash::repeat_byte(32); - let candidate_hash = Hash::repeat_byte(33); + let candidate_hash = CandidateHash(Hash::repeat_byte(33)); let validator_index = 5; let chunk = ErasureChunk { @@ -226,7 +274,7 @@ fn store_block_works() { let test_state = TestState::default(); test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { let TestHarness { mut virtual_overseer } = test_harness; - let candidate_hash = Hash::from([1; 32]); + let candidate_hash = CandidateHash(Hash::from([1; 32])); let validator_index = 5; let n_validators = 10; @@ -235,7 +283,7 @@ fn store_block_works() { }; let available_data = AvailableData { - pov, + pov: Arc::new(pov), validation_data: test_state.persisted_validation_data, }; @@ -280,7 +328,7 @@ fn store_pov_and_query_chunk_works() { test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { let TestHarness { mut virtual_overseer } = test_harness; - let candidate_hash = Hash::from([1; 32]); + let candidate_hash = CandidateHash(Hash::from([1; 32])); let n_validators = 10; let pov = PoV { @@ -288,7 +336,7 @@ fn store_pov_and_query_chunk_works() { }; let available_data = AvailableData { - pov, + pov: Arc::new(pov), validation_data: test_state.persisted_validation_data, }; @@ -323,7 +371,7 @@ fn stored_but_not_included_chunk_is_pruned() { test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { let TestHarness { mut virtual_overseer } = test_harness; - let candidate_hash = Hash::repeat_byte(1); + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); let relay_parent = Hash::repeat_byte(2); let validator_index = 5; @@ -378,7 +426,7 @@ fn stored_but_not_included_data_is_pruned() { test_harness(test_state.pruning_config.clone(), store.clone(), |test_harness| async move { let TestHarness { mut virtual_overseer } = test_harness; - let candidate_hash = Hash::repeat_byte(1); + let candidate_hash = CandidateHash(Hash::repeat_byte(1)); let n_validators = 10; let pov = PoV { @@ -386,7 +434,7 @@ fn stored_but_not_included_data_is_pruned() { }; let available_data = AvailableData { - pov, + pov: Arc::new(pov), validation_data: test_state.persisted_validation_data, }; @@ -440,7 +488,7 @@ fn stored_data_kept_until_finalized() { let candidate_hash = candidate.hash(); let available_data = AvailableData { - pov, + pov: Arc::new(pov), validation_data: test_state.persisted_validation_data, }; @@ -679,12 +727,12 @@ fn forkfullness_works() { let candidate_2_hash = candidate_2.hash(); let available_data_1 = AvailableData { - pov: pov_1, + pov: Arc::new(pov_1), validation_data: test_state.persisted_validation_data.clone(), }; let available_data_2 = AvailableData { - pov: pov_2, + pov: Arc::new(pov_2), validation_data: test_state.persisted_validation_data, }; @@ -805,7 +853,7 @@ fn forkfullness_works() { async fn query_available_data( virtual_overseer: &mut test_helpers::TestSubsystemContextHandle, - candidate_hash: Hash, + candidate_hash: CandidateHash, ) -> Option { let (tx, rx) = oneshot::channel(); @@ -817,7 +865,7 @@ async fn query_available_data( async fn query_chunk( virtual_overseer: &mut test_helpers::TestSubsystemContextHandle, - candidate_hash: Hash, + candidate_hash: CandidateHash, index: u32, ) -> Option { let (tx, rx) = oneshot::channel(); diff --git a/node/core/backing/Cargo.toml b/node/core/backing/Cargo.toml index 8892e201503c4ecfc14fa3d67a59a3fadfb5b7d2..d5e9df33c70aee361740c5044ccd2f30abb0465a 100644 --- a/node/core/backing/Cargo.toml +++ b/node/core/backing/Cargo.toml @@ -5,8 +5,8 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = "0.3.8" +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } @@ -14,14 +14,15 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } erasure-coding = { package = "polkadot-erasure-coding", path = "../../../erasure-coding" } statement-table = { package = "polkadot-statement-table", path = "../../../statement-table" } bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -log = "0.4.11" -thiserror = "1.0.21" +tracing = "0.1.22" +tracing-futures = "0.2.4" +thiserror = "1.0.22" [dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -futures = { version = "0.3.5", features = ["thread-pool"] } -assert_matches = "1.3.0" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +futures = { version = "0.3.8", features = ["thread-pool"] } +assert_matches = "1.4.0" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 9b3102d97b97e35335efb12353a73586439fdca4..54dbe295f7bf5892e45a20f676424c79289e1561 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -24,17 +24,14 @@ use std::pin::Pin; use std::sync::Arc; use bitvec::vec::BitVec; -use futures::{ - channel::{mpsc, oneshot}, - Future, FutureExt, SinkExt, StreamExt, -}; +use futures::{channel::{mpsc, oneshot}, Future, FutureExt, SinkExt, StreamExt}; use sp_keystore::SyncCryptoStorePtr; use polkadot_primitives::v1::{ CommittedCandidateReceipt, BackedCandidate, Id as ParaId, ValidatorId, - ValidatorIndex, SigningContext, PoV, + ValidatorIndex, SigningContext, PoV, CandidateHash, CandidateDescriptor, AvailableData, ValidatorSignature, Hash, CandidateReceipt, - CandidateCommitments, CoreState, CoreIndex, CollatorId, ValidationOutputs, + CoreState, CoreIndex, CollatorId, ValidityAttestation, CandidateCommitments, }; use polkadot_node_primitives::{ FromTableMisbehavior, Statement, SignedFullStatement, MisbehaviorReport, ValidationResult, @@ -42,9 +39,8 @@ use polkadot_node_primitives::{ use polkadot_subsystem::{ messages::{ AllMessages, AvailabilityStoreMessage, CandidateBackingMessage, CandidateSelectionMessage, - CandidateValidationMessage, NewBackedCandidate, PoVDistributionMessage, ProvisionableData, - ProvisionerMessage, RuntimeApiMessage, StatementDistributionMessage, ValidationFailed, - RuntimeApiRequest, + CandidateValidationMessage, PoVDistributionMessage, ProvisionableData, + ProvisionerMessage, StatementDistributionMessage, ValidationFailed, RuntimeApiRequest, }, }; use polkadot_node_subsystem_util::{ @@ -55,6 +51,7 @@ use polkadot_node_subsystem_util::{ request_from_runtime, Validator, delegated_subsystem, + FromJobCommand, metrics::{self, prometheus}, }; use statement_table::{ @@ -68,6 +65,8 @@ use statement_table::{ }; use thiserror::Error; +const LOG_TARGET: &str = "candidate_backing"; + #[derive(Debug, Error)] enum Error { #[error("Candidate is not found")] @@ -75,11 +74,17 @@ enum Error { #[error("Signature is invalid")] InvalidSignature, #[error("Failed to send candidates {0:?}")] - Send(Vec), - #[error("Oneshot never resolved")] - Oneshot(#[from] #[source] oneshot::Canceled), + Send(Vec), + #[error("FetchPoV channel closed before receipt")] + FetchPoV(#[source] oneshot::Canceled), + #[error("ValidateFromChainState channel closed before receipt")] + ValidateFromChainState(#[source] oneshot::Canceled), + #[error("StoreAvailableData channel closed before receipt")] + StoreAvailableData(#[source] oneshot::Canceled), + #[error("a channel was closed before receipt in try_join!")] + JoinMultiple(#[source] oneshot::Canceled), #[error("Obtaining erasure chunks failed")] - ObtainErasureChunks(#[from] #[source] erasure_coding::Error), + ObtainErasureChunks(#[from] erasure_coding::Error), #[error(transparent)] ValidationFailed(#[from] ValidationFailed), #[error(transparent)] @@ -88,27 +93,62 @@ enum Error { UtilError(#[from] util::Error), } +enum ValidatedCandidateCommand { + // We were instructed to second the candidate. + Second(BackgroundValidationResult), + // We were instructed to validate the candidate. + Attest(BackgroundValidationResult), +} + +impl std::fmt::Debug for ValidatedCandidateCommand { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let candidate_hash = self.candidate_hash(); + match *self { + ValidatedCandidateCommand::Second(_) => + write!(f, "Second({})", candidate_hash), + ValidatedCandidateCommand::Attest(_) => + write!(f, "Attest({})", candidate_hash), + } + } +} + +impl ValidatedCandidateCommand { + fn candidate_hash(&self) -> CandidateHash { + match *self { + ValidatedCandidateCommand::Second(Ok((ref candidate, _, _))) => candidate.hash(), + ValidatedCandidateCommand::Second(Err(ref candidate)) => candidate.hash(), + ValidatedCandidateCommand::Attest(Ok((ref candidate, _, _))) => candidate.hash(), + ValidatedCandidateCommand::Attest(Err(ref candidate)) => candidate.hash(), + } + } +} + /// Holds all data needed for candidate backing job operation. struct CandidateBackingJob { /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// Inbound message channel receiving part. - rx_to: mpsc::Receiver, /// Outbound message channel sending part. - tx_from: mpsc::Sender, + tx_from: mpsc::Sender, /// The `ParaId` assigned to this validator - assignment: ParaId, + assignment: Option, /// The collator required to author the candidate, if any. required_collator: Option, - /// We issued `Valid` or `Invalid` statements on about these candidates. - issued_statements: HashSet, + /// We issued `Seconded`, `Valid` or `Invalid` statements on about these candidates. + issued_statements: HashSet, + /// These candidates are undergoing validation in the background. + awaiting_validation: HashSet, /// `Some(h)` if this job has already issues `Seconded` statemt for some candidate with `h` hash. - seconded: Option, + seconded: Option, + /// The candidates that are includable, by hash. Each entry here indicates + /// that we've sent the provisioner the backed candidate. + backed: HashSet, /// We have already reported misbehaviors for these validators. reported_misbehavior_for: HashSet, keystore: SyncCryptoStorePtr, table: Table, table_context: TableContext, + background_validation: mpsc::Receiver, + background_validation_tx: mpsc::Sender, metrics: Metrics, } @@ -126,12 +166,12 @@ struct TableContext { impl TableContextTrait for TableContext { type AuthorityId = ValidatorIndex; - type Digest = Hash; + type Digest = CandidateHash; type GroupId = ParaId; type Signature = ValidatorSignature; type Candidate = CommittedCandidateReceipt; - fn candidate_digest(candidate: &CommittedCandidateReceipt) -> Hash { + fn candidate_digest(candidate: &CommittedCandidateReceipt) -> CandidateHash { candidate.hash() } @@ -148,110 +188,363 @@ impl TableContextTrait for TableContext { } } -/// A message type that is sent from `CandidateBackingSubsystem` to `CandidateBackingJob`. -pub enum ToJob { - /// A `CandidateBackingMessage`. - CandidateBacking(CandidateBackingMessage), - /// Stop working. - Stop, +struct InvalidErasureRoot; + +// It looks like it's not possible to do an `impl From` given the current state of +// the code. So this does the necessary conversion. +fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { + let statement = match s.payload() { + Statement::Seconded(c) => TableStatement::Candidate(c.clone()), + Statement::Valid(h) => TableStatement::Valid(h.clone()), + Statement::Invalid(h) => TableStatement::Invalid(h.clone()), + }; + + TableSignedStatement { + statement, + signature: s.signature().clone(), + sender: s.validator_index(), + } } -impl TryFrom for ToJob { - type Error = (); +#[tracing::instrument(level = "trace", skip(attested, table_context), fields(subsystem = LOG_TARGET))] +fn table_attested_to_backed( + attested: TableAttestedCandidate< + ParaId, + CommittedCandidateReceipt, + ValidatorIndex, + ValidatorSignature, + >, + table_context: &TableContext, +) -> Option { + let TableAttestedCandidate { candidate, validity_votes, group_id: para_id } = attested; + + let (ids, validity_votes): (Vec<_>, Vec) = validity_votes + .into_iter() + .map(|(id, vote)| (id, vote.into())) + .unzip(); + + let group = table_context.groups.get(¶_id)?; + + let mut validator_indices = BitVec::with_capacity(group.len()); + + validator_indices.resize(group.len(), false); + + // The order of the validity votes in the backed candidate must match + // the order of bits set in the bitfield, which is not necessarily + // the order of the `validity_votes` we got from the table. + let mut vote_positions = Vec::with_capacity(validity_votes.len()); + for (orig_idx, id) in ids.iter().enumerate() { + if let Some(position) = group.iter().position(|x| x == id) { + validator_indices.set(position, true); + vote_positions.push((orig_idx, position)); + } else { + tracing::warn!( + target: LOG_TARGET, + "Logic error: Validity vote from table does not correspond to group", + ); - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::CandidateBacking(msg) => Ok(ToJob::CandidateBacking(msg)), - _ => Err(()), + return None; } } + vote_positions.sort_by_key(|(_orig, pos_in_group)| *pos_in_group); + + Some(BackedCandidate { + candidate, + validity_votes: vote_positions.into_iter() + .map(|(pos_in_votes, _pos_in_group)| validity_votes[pos_in_votes].clone()) + .collect(), + validator_indices, + }) } -impl From for ToJob { - fn from(msg: CandidateBackingMessage) -> Self { - Self::CandidateBacking(msg) - } +async fn store_available_data( + tx_from: &mut mpsc::Sender, + id: Option, + n_validators: u32, + candidate_hash: CandidateHash, + available_data: AvailableData, +) -> Result<(), Error> { + let (tx, rx) = oneshot::channel(); + tx_from.send(AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData( + candidate_hash, + id, + n_validators, + available_data, + tx, + ) + ).into() + ).await?; + + let _ = rx.await.map_err(Error::StoreAvailableData)?; + + Ok(()) } -impl util::ToJobTrait for ToJob { - const STOP: Self = ToJob::Stop; +// Make a `PoV` available. +// +// This will compute the erasure root internally and compare it to the expected erasure root. +// This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`. +#[tracing::instrument(level = "trace", skip(tx_from, pov), fields(subsystem = LOG_TARGET))] +async fn make_pov_available( + tx_from: &mut mpsc::Sender, + validator_index: Option, + n_validators: usize, + pov: Arc, + candidate_hash: CandidateHash, + validation_data: polkadot_primitives::v1::PersistedValidationData, + expected_erasure_root: Hash, +) -> Result, Error> { + let available_data = AvailableData { + pov, + validation_data, + }; - fn relay_parent(&self) -> Option { - match self { - Self::CandidateBacking(cb) => cb.relay_parent(), - Self::Stop => None, - } + let chunks = erasure_coding::obtain_chunks_v1( + n_validators, + &available_data, + )?; + + let branches = erasure_coding::branches(chunks.as_ref()); + let erasure_root = branches.root(); + + if erasure_root != expected_erasure_root { + return Ok(Err(InvalidErasureRoot)); } + + store_available_data( + tx_from, + validator_index, + n_validators as u32, + candidate_hash, + available_data, + ).await?; + + Ok(Ok(())) } -/// A message type that is sent from `CandidateBackingJob` to `CandidateBackingSubsystem`. -enum FromJob { - AvailabilityStore(AvailabilityStoreMessage), - RuntimeApiMessage(RuntimeApiMessage), - CandidateValidation(CandidateValidationMessage), - CandidateSelection(CandidateSelectionMessage), - Provisioner(ProvisionerMessage), - PoVDistribution(PoVDistributionMessage), - StatementDistribution(StatementDistributionMessage), +async fn request_pov_from_distribution( + tx_from: &mut mpsc::Sender, + parent: Hash, + descriptor: CandidateDescriptor, +) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + + tx_from.send(AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(parent, descriptor, tx) + ).into()).await?; + + rx.await.map_err(Error::FetchPoV) } -impl From for AllMessages { - fn from(f: FromJob) -> Self { - match f { - FromJob::AvailabilityStore(msg) => AllMessages::AvailabilityStore(msg), - FromJob::RuntimeApiMessage(msg) => AllMessages::RuntimeApi(msg), - FromJob::CandidateValidation(msg) => AllMessages::CandidateValidation(msg), - FromJob::CandidateSelection(msg) => AllMessages::CandidateSelection(msg), - FromJob::StatementDistribution(msg) => AllMessages::StatementDistribution(msg), - FromJob::PoVDistribution(msg) => AllMessages::PoVDistribution(msg), - FromJob::Provisioner(msg) => AllMessages::Provisioner(msg), - } +async fn request_candidate_validation( + tx_from: &mut mpsc::Sender, + candidate: CandidateDescriptor, + pov: Arc, +) -> Result { + let (tx, rx) = oneshot::channel(); + + tx_from.send(AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + candidate, + pov, + tx, + ) + ).into() + ).await?; + + match rx.await { + Ok(Ok(validation_result)) => Ok(validation_result), + Ok(Err(err)) => Err(Error::ValidationFailed(err)), + Err(err) => Err(Error::ValidateFromChainState(err)), } } -impl TryFrom for FromJob { - type Error = &'static str; - - fn try_from(f: AllMessages) -> Result { - match f { - AllMessages::AvailabilityStore(msg) => Ok(FromJob::AvailabilityStore(msg)), - AllMessages::RuntimeApi(msg) => Ok(FromJob::RuntimeApiMessage(msg)), - AllMessages::CandidateValidation(msg) => Ok(FromJob::CandidateValidation(msg)), - AllMessages::CandidateSelection(msg) => Ok(FromJob::CandidateSelection(msg)), - AllMessages::StatementDistribution(msg) => Ok(FromJob::StatementDistribution(msg)), - AllMessages::PoVDistribution(msg) => Ok(FromJob::PoVDistribution(msg)), - AllMessages::Provisioner(msg) => Ok(FromJob::Provisioner(msg)), - _ => Err("can't convert this AllMessages variant to FromJob"), - } - } +type BackgroundValidationResult = Result<(CandidateReceipt, CandidateCommitments, Arc), CandidateReceipt>; + +struct BackgroundValidationParams { + tx_from: mpsc::Sender, + tx_command: mpsc::Sender, + candidate: CandidateReceipt, + relay_parent: Hash, + pov: Option>, + validator_index: Option, + n_validators: usize, + make_command: F, } -// It looks like it's not possible to do an `impl From` given the current state of -// the code. So this does the necessary conversion. -fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { - let statement = match s.payload() { - Statement::Seconded(c) => TableStatement::Candidate(c.clone()), - Statement::Valid(h) => TableStatement::Valid(h.clone()), - Statement::Invalid(h) => TableStatement::Invalid(h.clone()), +async fn validate_and_make_available( + params: BackgroundValidationParams ValidatedCandidateCommand>, +) -> Result<(), Error> { + let BackgroundValidationParams { + mut tx_from, + mut tx_command, + candidate, + relay_parent, + pov, + validator_index, + n_validators, + make_command, + } = params; + + let pov = match pov { + Some(pov) => pov, + None => request_pov_from_distribution( + &mut tx_from, + relay_parent, + candidate.descriptor.clone(), + ).await?, }; - TableSignedStatement { - statement, - signature: s.signature().clone(), - sender: s.validator_index(), - } + let v = request_candidate_validation(&mut tx_from, candidate.descriptor.clone(), pov.clone()).await?; + + let expected_commitments_hash = candidate.commitments_hash; + + let res = match v { + ValidationResult::Valid(commitments, validation_data) => { + // If validation produces a new set of commitments, we vote the candidate as invalid. + if commitments.hash() != expected_commitments_hash { + tracing::trace!( + target: LOG_TARGET, + candidate_receipt = ?candidate, + actual_commitments = ?commitments, + "Commitments obtained with validation don't match the announced by the candidate receipt", + ); + Err(candidate) + } else { + let erasure_valid = make_pov_available( + &mut tx_from, + validator_index, + n_validators, + pov.clone(), + candidate.hash(), + validation_data, + candidate.descriptor.erasure_root, + ).await?; + + match erasure_valid { + Ok(()) => Ok((candidate, commitments, pov.clone())), + Err(InvalidErasureRoot) => { + tracing::trace!( + target: LOG_TARGET, + candidate_receipt = ?candidate, + actual_commitments = ?commitments, + "Erasure root doesn't match the announced by the candidate receipt", + ); + Err(candidate) + }, + } + } + } + ValidationResult::Invalid(reason) => { + tracing::trace!( + target: LOG_TARGET, + candidate_receipt = ?candidate, + reason = ?reason, + "Validation yielded an invalid candidate", + ); + Err(candidate) + } + }; + + let command = make_command(res); + tx_command.send(command).await?; + Ok(()) } impl CandidateBackingJob { /// Run asynchronously. - async fn run_loop(mut self) -> Result<(), Error> { - while let Some(msg) = self.rx_to.next().await { - match msg { - ToJob::CandidateBacking(msg) => { - self.process_msg(msg).await?; + async fn run_loop( + mut self, + mut rx_to: mpsc::Receiver, + ) -> Result<(), Error> { + loop { + futures::select! { + validated_command = self.background_validation.next() => { + if let Some(c) = validated_command { + self.handle_validated_candidate_command(c).await?; + } else { + panic!("`self` hasn't dropped and `self` holds a reference to this sender; qed"); + } + } + to_job = rx_to.next() => match to_job { + None => break, + Some(msg) => { + self.process_msg(msg).await?; + } + } + } + } + + Ok(()) + } + + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + async fn handle_validated_candidate_command( + &mut self, + command: ValidatedCandidateCommand, + ) -> Result<(), Error> { + let candidate_hash = command.candidate_hash(); + self.awaiting_validation.remove(&candidate_hash); + + match command { + ValidatedCandidateCommand::Second(res) => { + match res { + Ok((candidate, commitments, pov)) => { + // sanity check. + if self.seconded.is_none() && !self.issued_statements.contains(&candidate_hash) { + self.seconded = Some(candidate_hash); + self.issued_statements.insert(candidate_hash); + self.metrics.on_candidate_seconded(); + + let statement = Statement::Seconded(CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments, + }); + self.sign_import_and_distribute_statement(statement).await?; + self.distribute_pov(candidate.descriptor, pov).await?; + } + } + Err(candidate) => { + self.issue_candidate_invalid_message(candidate).await?; + } } - _ => break, } + ValidatedCandidateCommand::Attest(res) => { + // sanity check. + if !self.issued_statements.contains(&candidate_hash) { + let statement = if res.is_ok() { + Statement::Valid(candidate_hash) + } else { + Statement::Invalid(candidate_hash) + }; + + self.issued_statements.insert(candidate_hash); + self.sign_import_and_distribute_statement(statement).await?; + } + } + } + + Ok(()) + } + + #[tracing::instrument(level = "trace", skip(self, params), fields(subsystem = LOG_TARGET))] + async fn background_validate_and_make_available( + &mut self, + params: BackgroundValidationParams< + impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + >, + ) -> Result<(), Error> { + let candidate_hash = params.candidate.hash(); + + if self.awaiting_validation.insert(candidate_hash) { + // spawn background task. + let bg = async move { + if let Err(e) = validate_and_make_available(params).await { + tracing::error!("Failed to validate and make available: {:?}", e); + } + }; + self.tx_from.send(FromJobCommand::Spawn("Backing Validation", bg.boxed())).await?; } Ok(()) @@ -261,133 +554,53 @@ impl CandidateBackingJob { &mut self, candidate: CandidateReceipt, ) -> Result<(), Error> { - self.tx_from.send(FromJob::CandidateSelection( - CandidateSelectionMessage::Invalid(self.parent, candidate) - )).await?; + self.tx_from.send(AllMessages::from(CandidateSelectionMessage::Invalid(self.parent, candidate)).into()).await?; Ok(()) } - /// Validate the candidate that is requested to be `Second`ed and distribute validation result. - /// - /// Returns `Ok(true)` if we issued a `Seconded` statement about this candidate. + /// Kick off background validation with intent to second. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn validate_and_second( &mut self, candidate: &CandidateReceipt, - pov: PoV, - ) -> Result { + pov: Arc, + ) -> Result<(), Error> { // Check that candidate is collated by the right collator. if self.required_collator.as_ref() .map_or(false, |c| c != &candidate.descriptor().collator) { self.issue_candidate_invalid_message(candidate.clone()).await?; - return Ok(false); + return Ok(()); } - let valid = self.request_candidate_validation( - candidate.descriptor().clone(), - Arc::new(pov.clone()), - ).await?; - - let candidate_hash = candidate.hash(); - - let statement = match valid { - ValidationResult::Valid(outputs, validation_data) => { - // make PoV available for later distribution. Send data to the availability - // store to keep. Sign and dispatch `valid` statement to network if we - // have not seconded the given candidate. - // - // If the commitments hash produced by validation is not the same as given by - // the collator, do not make available and report the collator. - let commitments_check = self.make_pov_available( - pov, - validation_data, - outputs, - |commitments| if commitments.hash() == candidate.commitments_hash { - Ok(CommittedCandidateReceipt { - descriptor: candidate.descriptor().clone(), - commitments, - }) - } else { - Err(()) - }, - ).await?; + self.background_validate_and_make_available(BackgroundValidationParams { + tx_from: self.tx_from.clone(), + tx_command: self.background_validation_tx.clone(), + candidate: candidate.clone(), + relay_parent: self.parent, + pov: Some(pov), + validator_index: self.table_context.validator.as_ref().map(|v| v.index()), + n_validators: self.table_context.validators.len(), + make_command: ValidatedCandidateCommand::Second, + }).await?; - match commitments_check { - Ok(candidate) => { - self.issued_statements.insert(candidate_hash); - Some(Statement::Seconded(candidate)) - } - Err(()) => { - self.issue_candidate_invalid_message(candidate.clone()).await?; - None - } - } - } - ValidationResult::Invalid(_reason) => { - // no need to issue a statement about this if we aren't seconding it. - // - // there's an infinite amount of garbage out there. no need to acknowledge - // all of it. - self.issue_candidate_invalid_message(candidate.clone()).await?; - None - } - }; - - let issued_statement = statement.is_some(); - - if let Some(statement) = statement { - if let Some(signed_statement) = self.sign_statement(statement).await { - self.import_statement(&signed_statement).await?; - self.distribute_signed_statement(signed_statement).await?; - } - } - - Ok(issued_statement) + Ok(()) } - fn get_backed(&self) -> Vec { - let proposed = self.table.proposed_candidates(&self.table_context); - let mut res = Vec::with_capacity(proposed.len()); - - for p in proposed.into_iter() { - let TableAttestedCandidate { candidate, validity_votes, .. } = p; - - let (ids, validity_votes): (Vec<_>, Vec<_>) = validity_votes - .into_iter() - .map(|(id, vote)| (id, vote.into())) - .unzip(); - - let group = match self.table_context.groups.get(&self.assignment) { - Some(group) => group, - None => continue, - }; - - let mut validator_indices = BitVec::with_capacity(group.len()); - - validator_indices.resize(group.len(), false); - - for id in ids.iter() { - if let Some(position) = group.iter().position(|x| x == id) { - validator_indices.set(position, true); - } - } - - let backed = BackedCandidate { - candidate, - validity_votes, - validator_indices, - }; - - res.push(NewBackedCandidate(backed.clone())); + async fn sign_import_and_distribute_statement(&mut self, statement: Statement) -> Result<(), Error> { + if let Some(signed_statement) = self.sign_statement(statement).await { + self.import_statement(&signed_statement).await?; + self.distribute_signed_statement(signed_statement).await?; } - res + Ok(()) } /// Check if there have happened any new misbehaviors and issue necessary messages. /// /// TODO: Report multiple misbehaviors (https://github.com/paritytech/polkadot/issues/1387) + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn issue_new_misbehaviors(&mut self) -> Result<(), Error> { let mut reports = Vec::new(); @@ -404,6 +617,7 @@ impl CandidateBackingJob { if let Ok(report) = MisbehaviorReport::try_from(f) { let message = ProvisionerMessage::ProvisionableData( + self.parent, ProvisionableData::MisbehaviorReport(self.parent, report), ); @@ -420,6 +634,7 @@ impl CandidateBackingJob { } /// Import a statement into the statement table and return the summary of the import. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn import_statement( &mut self, statement: &SignedFullStatement, @@ -428,42 +643,59 @@ impl CandidateBackingJob { let summary = self.table.import_statement(&self.table_context, stmt); + if let Some(ref summary) = summary { + if let Some(attested) = self.table.attested_candidate( + &summary.candidate, + &self.table_context, + ) { + // `HashSet::insert` returns true if the thing wasn't in there already. + // one of the few places the Rust-std folks did a bad job with API + if self.backed.insert(summary.candidate) { + if let Some(backed) = + table_attested_to_backed(attested, &self.table_context) + { + let message = ProvisionerMessage::ProvisionableData( + self.parent, + ProvisionableData::BackedCandidate(backed.receipt()), + ); + self.send_to_provisioner(message).await?; + } + } + } + } + self.issue_new_misbehaviors().await?; - return Ok(summary); + Ok(summary) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn process_msg(&mut self, msg: CandidateBackingMessage) -> Result<(), Error> { match msg { CandidateBackingMessage::Second(_, candidate, pov) => { + let _timer = self.metrics.time_process_second(); + // Sanity check that candidate is from our assignment. - if candidate.descriptor().para_id != self.assignment { + if Some(candidate.descriptor().para_id) != self.assignment { return Ok(()); } // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a // Seconded statement only if we have not seconded any other candidate and // have not signed a Valid statement for the requested candidate. - match self.seconded { + if self.seconded.is_none() { // This job has not seconded a candidate yet. - None => { - let candidate_hash = candidate.hash(); - - if !self.issued_statements.contains(&candidate_hash) { - if let Ok(true) = self.validate_and_second( - &candidate, - pov, - ).await { - self.metrics.on_candidate_seconded(); - self.seconded = Some(candidate_hash); - } - } + let candidate_hash = candidate.hash(); + let pov = Arc::new(pov); + + if !self.issued_statements.contains(&candidate_hash) { + self.validate_and_second(&candidate, pov.clone()).await?; } - // This job has already seconded a candidate. - Some(_) => {} } } CandidateBackingMessage::Statement(_, statement) => { + let _timer = self.metrics.time_process_statement(); + self.check_statement_signature(&statement)?; match self.maybe_validate_and_import(statement).await { Err(Error::ValidationFailed(_)) => return Ok(()), @@ -471,8 +703,16 @@ impl CandidateBackingJob { Ok(()) => (), } } - CandidateBackingMessage::GetBackedCandidates(_, tx) => { - let backed = self.get_backed(); + CandidateBackingMessage::GetBackedCandidates(_, requested_candidates, tx) => { + let _timer = self.metrics.time_get_backed_candidates(); + + let backed = requested_candidates + .into_iter() + .filter_map(|hash| { + self.table.attested_candidate(&hash, &self.table_context) + .and_then(|attested| table_attested_to_backed(attested, &self.table_context)) + }) + .collect(); tx.send(backed).map_err(|data| Error::Send(data))?; } @@ -482,11 +722,12 @@ impl CandidateBackingJob { } /// Kick off validation work and distribute the result as a signed statement. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn kick_off_validation_work( &mut self, summary: TableSummary, ) -> Result<(), Error> { - let candidate_hash = summary.candidate.clone(); + let candidate_hash = summary.candidate; if self.issued_statements.contains(&candidate_hash) { return Ok(()) @@ -495,9 +736,7 @@ impl CandidateBackingJob { // We clone the commitments here because there are borrowck // errors relating to this being a struct and methods borrowing the entirety of self // and not just those things that the function uses. - let candidate = self.table.get_candidate(&candidate_hash).ok_or(Error::CandidateNotFound)?; - let expected_commitments = candidate.commitments.clone(); - + let candidate = self.table.get_candidate(&candidate_hash).ok_or(Error::CandidateNotFound)?.to_plain(); let descriptor = candidate.descriptor().clone(); // Check that candidate is collated by the right collator. @@ -512,50 +751,27 @@ impl CandidateBackingJob { return Ok(()); } - let pov = self.request_pov_from_distribution(descriptor.clone()).await?; - let v = self.request_candidate_validation(descriptor, pov.clone()).await?; - - let statement = match v { - ValidationResult::Valid(outputs, validation_data) => { - // If validation produces a new set of commitments, we vote the candidate as invalid. - let commitments_check = self.make_pov_available( - (&*pov).clone(), - validation_data, - outputs, - |commitments| if commitments == expected_commitments { - Ok(()) - } else { - Err(()) - } - ).await?; - - match commitments_check { - Ok(()) => Statement::Valid(candidate_hash), - Err(()) => Statement::Invalid(candidate_hash), - } - } - ValidationResult::Invalid(_reason) => { - Statement::Invalid(candidate_hash) - } - }; - - self.issued_statements.insert(candidate_hash); - - if let Some(signed_statement) = self.sign_statement(statement).await { - self.distribute_signed_statement(signed_statement).await?; - } - - Ok(()) + self.background_validate_and_make_available(BackgroundValidationParams { + tx_from: self.tx_from.clone(), + tx_command: self.background_validation_tx.clone(), + candidate, + relay_parent: self.parent, + pov: None, + validator_index: self.table_context.validator.as_ref().map(|v| v.index()), + n_validators: self.table_context.validators.len(), + make_command: ValidatedCandidateCommand::Attest, + }).await } /// Import the statement and kick off validation work if it is a part of our assignment. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn maybe_validate_and_import( &mut self, statement: SignedFullStatement, ) -> Result<(), Error> { if let Some(summary) = self.import_statement(&statement).await? { if let Statement::Seconded(_) = statement.payload() { - if summary.group_id == self.assignment { + if Some(summary.group_id) == self.assignment { self.kick_off_validation_work(summary).await?; } } @@ -564,6 +780,7 @@ impl CandidateBackingJob { Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn sign_statement(&self, statement: Statement) -> Option { let signed = self.table_context .validator @@ -575,6 +792,7 @@ impl CandidateBackingJob { Some(signed) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn check_statement_signature(&self, statement: &SignedFullStatement) -> Result<(), Error> { let idx = statement.validator_index() as usize; @@ -591,137 +809,45 @@ impl CandidateBackingJob { } async fn send_to_provisioner(&mut self, msg: ProvisionerMessage) -> Result<(), Error> { - self.tx_from.send(FromJob::Provisioner(msg)).await?; + self.tx_from.send(AllMessages::from(msg).into()).await?; Ok(()) } - async fn request_pov_from_distribution( + async fn distribute_pov( &mut self, descriptor: CandidateDescriptor, - ) -> Result, Error> { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::PoVDistribution( - PoVDistributionMessage::FetchPoV(self.parent, descriptor, tx) - )).await?; - - Ok(rx.await?) - } - - async fn request_candidate_validation( - &mut self, - candidate: CandidateDescriptor, pov: Arc, - ) -> Result { - let (tx, rx) = oneshot::channel(); - - self.tx_from.send(FromJob::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - candidate, - pov, - tx, - ) - ) - ).await?; - - Ok(rx.await??) - } - - async fn store_available_data( - &mut self, - id: Option, - n_validators: u32, - available_data: AvailableData, ) -> Result<(), Error> { - let (tx, rx) = oneshot::channel(); - self.tx_from.send(FromJob::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData( - self.parent, - id, - n_validators, - available_data, - tx, - ) - ) - ).await?; - - let _ = rx.await?; - - Ok(()) - } - - // Make a `PoV` available. - // - // This calls an inspection function before making the PoV available for any last checks - // that need to be done. If the inspection function returns an error, this function returns - // early without making the PoV available. - async fn make_pov_available( - &mut self, - pov: PoV, - validation_data: polkadot_primitives::v1::PersistedValidationData, - outputs: ValidationOutputs, - with_commitments: impl FnOnce(CandidateCommitments) -> Result, - ) -> Result, Error> { - let available_data = AvailableData { - pov, - validation_data, - }; - - let chunks = erasure_coding::obtain_chunks_v1( - self.table_context.validators.len(), - &available_data, - )?; - - let branches = erasure_coding::branches(chunks.as_ref()); - let erasure_root = branches.root(); - - let commitments = CandidateCommitments { - upward_messages: outputs.upward_messages, - erasure_root, - new_validation_code: outputs.new_validation_code, - head_data: outputs.head_data, - processed_downward_messages: outputs.processed_downward_messages, - }; - - let res = match with_commitments(commitments) { - Ok(x) => x, - Err(e) => return Ok(Err(e)), - }; - - self.store_available_data( - self.table_context.validator.as_ref().map(|v| v.index()), - self.table_context.validators.len() as u32, - available_data, - ).await?; - - Ok(Ok(res)) + self.tx_from.send(AllMessages::from( + PoVDistributionMessage::DistributePoV(self.parent, descriptor, pov), + ).into()).await.map_err(Into::into) } async fn distribute_signed_statement(&mut self, s: SignedFullStatement) -> Result<(), Error> { let smsg = StatementDistributionMessage::Share(self.parent, s); - self.tx_from.send(FromJob::StatementDistribution(smsg)).await?; + self.tx_from.send(AllMessages::from(smsg).into()).await?; Ok(()) } } impl util::JobTrait for CandidateBackingJob { - type ToJob = ToJob; - type FromJob = FromJob; + type ToJob = CandidateBackingMessage; type Error = Error; type RunArgs = SyncCryptoStorePtr; type Metrics = Metrics; const NAME: &'static str = "CandidateBackingJob"; + #[tracing::instrument(skip(keystore, metrics, rx_to, tx_from), fields(subsystem = LOG_TARGET))] fn run( parent: Hash, keystore: SyncCryptoStorePtr, metrics: Metrics, rx_to: mpsc::Receiver, - mut tx_from: mpsc::Sender, + mut tx_from: mpsc::Sender, ) -> Pin> + Send>> { async move { macro_rules! try_runtime_api { @@ -729,10 +855,10 @@ impl util::JobTrait for CandidateBackingJob { match $x { Ok(x) => x, Err(e) => { - log::warn!( - target: "candidate_backing", - "Failed to fetch runtime API data for job: {:?}", - e, + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to fetch runtime API data for job", ); // We can't do candidate validation work if we don't have the @@ -745,15 +871,15 @@ impl util::JobTrait for CandidateBackingJob { } let (validators, groups, session_index, cores) = futures::try_join!( - request_validators(parent, &mut tx_from).await?, - request_validator_groups(parent, &mut tx_from).await?, - request_session_index_for_child(parent, &mut tx_from).await?, - request_from_runtime( + try_runtime_api!(request_validators(parent, &mut tx_from).await), + try_runtime_api!(request_validator_groups(parent, &mut tx_from).await), + try_runtime_api!(request_session_index_for_child(parent, &mut tx_from).await), + try_runtime_api!(request_from_runtime( parent, &mut tx_from, |tx| RuntimeApiRequest::AvailabilityCores(tx), - ).await?, - )?; + ).await), + ).map_err(Error::JoinMultiple)?; let validators = try_runtime_api!(validators); let (validator_groups, group_rotation_info) = try_runtime_api!(groups); @@ -769,10 +895,10 @@ impl util::JobTrait for CandidateBackingJob { Ok(v) => v, Err(util::Error::NotAValidator) => { return Ok(()) }, Err(e) => { - log::warn!( - target: "candidate_backing", - "Cannot participate in candidate backing: {:?}", - e + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Cannot participate in candidate backing", ); return Ok(()) @@ -806,26 +932,30 @@ impl util::JobTrait for CandidateBackingJob { }; let (assignment, required_collator) = match assignment { - None => return Ok(()), // no need to work. - Some((a, r)) => (a, r), + None => (None, None), + Some((assignment, required_collator)) => (Some(assignment), required_collator), }; + let (background_tx, background_rx) = mpsc::channel(16); let job = CandidateBackingJob { parent, - rx_to, tx_from, assignment, required_collator, issued_statements: HashSet::new(), + awaiting_validation: HashSet::new(), seconded: None, + backed: HashSet::new(), reported_misbehavior_for: HashSet::new(), keystore, table: Table::default(), table_context, + background_validation: background_rx, + background_validation_tx: background_tx, metrics, }; - job.run_loop().await + job.run_loop(rx_to).await } .boxed() } @@ -834,7 +964,10 @@ impl util::JobTrait for CandidateBackingJob { #[derive(Clone)] struct MetricsInner { signed_statements_total: prometheus::Counter, - candidates_seconded_total: prometheus::Counter + candidates_seconded_total: prometheus::Counter, + process_second: prometheus::Histogram, + process_statement: prometheus::Histogram, + get_backed_candidates: prometheus::Histogram, } /// Candidate backing metrics. @@ -853,6 +986,21 @@ impl Metrics { metrics.candidates_seconded_total.inc(); } } + + /// Provide a timer for handling `CandidateBackingMessage:Second` which observes on drop. + fn time_process_second(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_second.start_timer()) + } + + /// Provide a timer for handling `CandidateBackingMessage::Statement` which observes on drop. + fn time_process_statement(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_statement.start_timer()) + } + + /// Provide a timer for handling `CandidateBackingMessage::GetBackedCandidates` which observes on drop. + fn time_get_backed_candidates(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.get_backed_candidates.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -860,24 +1008,51 @@ impl metrics::Metrics for Metrics { let metrics = MetricsInner { signed_statements_total: prometheus::register( prometheus::Counter::new( - "parachain_signed_statements_total", + "parachain_candidate_backing_signed_statements_total", "Number of statements signed.", )?, registry, )?, candidates_seconded_total: prometheus::register( prometheus::Counter::new( - "parachain_candidates_seconded_total", + "parachain_candidate_backing_candidates_seconded_total", "Number of candidates seconded.", )?, registry, )?, + process_second: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_backing_process_second", + "Time spent within `candidate_backing::process_second`", + ) + )?, + registry, + )?, + process_statement: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_backing_process_statement", + "Time spent within `candidate_backing::process_statement`", + ) + )?, + registry, + )?, + get_backed_candidates: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_backing_get_backed_candidates", + "Time spent within `candidate_backing::get_backed_candidates`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } -delegated_subsystem!(CandidateBackingJob(SyncCryptoStorePtr, Metrics) <- ToJob as CandidateBackingSubsystem); +delegated_subsystem!(CandidateBackingJob(SyncCryptoStorePtr, Metrics) <- CandidateBackingMessage as CandidateBackingSubsystem); #[cfg(test)] mod tests { @@ -885,12 +1060,11 @@ mod tests { use assert_matches::assert_matches; use futures::{future, Future}; use polkadot_primitives::v1::{ - ScheduledCore, BlockData, CandidateCommitments, - PersistedValidationData, ValidationData, TransientValidationData, HeadData, - ValidityAttestation, GroupRotationInfo, + ScheduledCore, BlockData, PersistedValidationData, ValidationData, + TransientValidationData, HeadData, GroupRotationInfo, }; use polkadot_subsystem::{ - messages::RuntimeApiRequest, + messages::{RuntimeApiRequest, RuntimeApiMessage}, ActiveLeavesUpdate, FromOverseer, OverseerSignal, }; use polkadot_node_primitives::InvalidCandidate; @@ -930,6 +1104,7 @@ mod tests { Sr25519Keyring::Charlie, Sr25519Keyring::Dave, Sr25519Keyring::Ferdie, + Sr25519Keyring::One, ]; let keystore = Arc::new(sc_keystore::LocalKeystore::in_memory()); @@ -939,7 +1114,7 @@ mod tests { let validator_public = validator_pubkeys(&validators); - let validator_groups = vec![vec![2, 0, 3], vec![1], vec![4]]; + let validator_groups = vec![vec![2, 0, 3, 5], vec![1], vec![4]]; let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, @@ -978,6 +1153,7 @@ mod tests { block_number: Default::default(), hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), + max_pov_size: 1024, }, transient: TransientValidationData { max_code_size: 1000, @@ -1026,7 +1202,7 @@ mod tests { fn make_erasure_root(test: &TestState, pov: PoV) -> Hash { let available_data = AvailableData { validation_data: test.validation_data.persisted.clone(), - pov, + pov: Arc::new(pov), }; let chunks = erasure_coding::obtain_chunks_v1(test.validators.len(), &available_data).unwrap(); @@ -1049,11 +1225,11 @@ mod tests { para_id: self.para_id, pov_hash: self.pov_hash, relay_parent: self.relay_parent, + erasure_root: self.erasure_root, ..Default::default() }, commitments: CandidateCommitments { head_data: self.head_data, - erasure_root: self.erasure_root, ..Default::default() }, } @@ -1156,11 +1332,13 @@ mod tests { ) ) if pov == pov && &c == candidate.descriptor() => { tx.send(Ok( - ValidationResult::Valid(ValidationOutputs { + ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), upward_messages: Vec::new(), new_validation_code: None, processed_downward_messages: 0, + hrmp_watermark: 0, }, test_state.validation_data.persisted), )).unwrap(); } @@ -1169,8 +1347,8 @@ mod tests { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) - ) if parent_hash == test_state.relay_parent => { + AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx) + ) if candidate_hash == candidate.hash() => { tx.send(Ok(())).unwrap(); } ); @@ -1190,6 +1368,15 @@ mod tests { } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution(PoVDistributionMessage::DistributePoV(hash, descriptor, pov_received)) => { + assert_eq!(test_state.relay_parent, hash); + assert_eq!(candidate.descriptor, descriptor); + assert_eq!(pov, *pov_received); + } + ); + virtual_overseer.send(FromOverseer::Signal( OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) ).await; @@ -1225,12 +1412,20 @@ mod tests { let candidate_a_hash = candidate_a.hash(); let public0 = CryptoStore::sr25519_generate_new( &*test_state.keystore, - ValidatorId::ID, Some(&test_state.validators[0].to_seed()) + ValidatorId::ID, + Some(&test_state.validators[0].to_seed()), + ).await.expect("Insert key into keystore"); + let public1 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), ).await.expect("Insert key into keystore"); let public2 = CryptoStore::sr25519_generate_new( &*test_state.keystore, - ValidatorId::ID, Some(&test_state.validators[2].to_seed()) + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), ).await.expect("Insert key into keystore"); + let signed_a = SignedFullStatement::sign( &test_state.keystore, Statement::Seconded(candidate_a.clone()), @@ -1243,8 +1438,8 @@ mod tests { &test_state.keystore, Statement::Valid(candidate_a_hash), &test_state.signing_context, - 0, - &public0.into(), + 5, + &public1.into(), ).await.expect("should be signed"); let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); @@ -1275,11 +1470,13 @@ mod tests { ) ) if pov == pov && &c == candidate_a.descriptor() => { tx.send(Ok( - ValidationResult::Valid(ValidationOutputs { + ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Vec::new(), + horizontal_messages: Vec::new(), new_validation_code: None, processed_downward_messages: 0, + hrmp_watermark: 0, }, test_state.validation_data.persisted), )).unwrap(); } @@ -1288,12 +1485,22 @@ mod tests { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) - ) if parent_hash == test_state.relay_parent => { + AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx) + ) if candidate_hash == candidate_a.hash() => { tx.send(Ok(())).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, stmt) + ) => { + assert_eq!(test_state.relay_parent, hash); + stmt.check_signature(&test_state.signing_context, &public0.into()).expect("Is signed correctly"); + } + ); + let statement = CandidateBackingMessage::Statement( test_state.relay_parent, signed_b.clone(), @@ -1301,28 +1508,173 @@ mod tests { virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate_a.to_plain()); + } + ); + + virtual_overseer.send(FromOverseer::Signal( + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) + ).await; + }); + } + + #[test] + fn backing_works_while_validation_ongoing() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |test_harness| async move { + let TestHarness { mut virtual_overseer } = test_harness; + + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { + block_data: BlockData(vec![1, 2, 3]), + }; + + let pov_hash = pov.hash(); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let candidate_a = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone()), + ..Default::default() + }.build(); + + let candidate_a_hash = candidate_a.hash(); + let public1 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ).await.expect("Insert key into keystore"); + let public2 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ).await.expect("Insert key into keystore"); + let public3 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[3].to_seed()), + ).await.expect("Insert key into keystore"); + + let signed_a = SignedFullStatement::sign( + &test_state.keystore, + Statement::Seconded(candidate_a.clone()), + &test_state.signing_context, + 2, + &public2.into(), + ).await.expect("should be signed"); + + let signed_b = SignedFullStatement::sign( + &test_state.keystore, + Statement::Valid(candidate_a_hash), + &test_state.signing_context, + 5, + &public1.into(), + ).await.expect("should be signed"); + + let signed_c = SignedFullStatement::sign( + &test_state.keystore, + Statement::Valid(candidate_a_hash), + &test_state.signing_context, + 3, + &public3.into(), + ).await.expect("should be signed"); + + let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + // Sending a `Statement::Seconded` for our assignment will start + // validation process. The first thing requested is PoV from the + // `PoVDistribution`. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::PoVDistribution( + PoVDistributionMessage::FetchPoV(relay_parent, _, tx) + ) if relay_parent == test_state.relay_parent => { + tx.send(Arc::new(pov.clone())).unwrap(); + } + ); + + // The next step is the actual request to Validation subsystem + // to validate the `Seconded` candidate. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromChainState( + c, + pov, + tx, + ) + ) if pov == pov && &c == candidate_a.descriptor() => { + // we never validate the candidate. our local node + // shouldn't issue any statements. + std::mem::forget(tx); + } + ); + + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + signed_b.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + let statement = CandidateBackingMessage::Statement( + test_state.relay_parent, + signed_c.clone(), + ); + + virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + + // Candidate gets backed entirely by other votes. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(CandidateReceipt { + descriptor, + .. + }) + ) + ) if descriptor == candidate_a.descriptor + ); + let (tx, rx) = oneshot::channel(); + let msg = CandidateBackingMessage::GetBackedCandidates( + test_state.relay_parent, + vec![candidate_a.hash()], + tx, + ); - // The backed candidats set should be not empty at this point. - virtual_overseer.send(FromOverseer::Communication{ - msg: CandidateBackingMessage::GetBackedCandidates( - test_state.relay_parent, - tx, - ) - }).await; + virtual_overseer.send(FromOverseer::Communication{ msg }).await; - let backed = rx.await.unwrap(); + let candidates = rx.await.unwrap(); + assert_eq!(1, candidates.len()); + assert_eq!(candidates[0].validity_votes.len(), 3); - // `validity_votes` may be in any order so we can't do this in a single assert. - assert_eq!(backed[0].0.candidate, candidate_a); - assert_eq!(backed[0].0.validity_votes.len(), 2); - assert!(backed[0].0.validity_votes.contains( + assert!(candidates[0].validity_votes.contains( + &ValidityAttestation::Implicit(signed_a.signature().clone()) + )); + assert!(candidates[0].validity_votes.contains( &ValidityAttestation::Explicit(signed_b.signature().clone()) )); - assert!(backed[0].0.validity_votes.contains( - &ValidityAttestation::Implicit(signed_a.signature().clone()) + assert!(candidates[0].validity_votes.contains( + &ValidityAttestation::Explicit(signed_c.signature().clone()) )); - assert_eq!(backed[0].0.validator_indices, bitvec::bitvec![Lsb0, u8; 1, 1, 0]); + assert_eq!(candidates[0].validator_indices, bitvec::bitvec![Lsb0, u8; 1, 0, 1, 1]); virtual_overseer.send(FromOverseer::Signal( OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work(test_state.relay_parent))) @@ -1376,10 +1728,10 @@ mod tests { let signed_b = SignedFullStatement::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + Statement::Invalid(candidate_a_hash), &test_state.signing_context, - 0, - &public0.into(), + 2, + &public2.into(), ).await.expect("should be signed"); let signed_c = SignedFullStatement::sign( @@ -1413,11 +1765,13 @@ mod tests { ) ) if pov == pov && &c == candidate_a.descriptor() => { tx.send(Ok( - ValidationResult::Valid(ValidationOutputs { + ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Vec::new(), + horizontal_messages: Vec::new(), new_validation_code: None, processed_downward_messages: 0, + hrmp_watermark: 0, }, test_state.validation_data.persisted), )).unwrap(); } @@ -1426,8 +1780,8 @@ mod tests { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) - ) if parent_hash == test_state.relay_parent => { + AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx) + ) if candidate_hash == candidate_a.hash() => { tx.send(Ok(())).unwrap(); } ); @@ -1449,10 +1803,37 @@ mod tests { } ); + // This `Invalid` statement contradicts the `Candidate` statement + // sent at first. let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::MisbehaviorReport( + relay_parent, + MisbehaviorReport::SelfContradiction(_, s1, s2), + ) + ) + ) if relay_parent == test_state.relay_parent => { + s1.check_signature( + &test_state.signing_context, + &test_state.validator_public[s1.validator_index() as usize], + ).unwrap(); + + s2.check_signature( + &test_state.signing_context, + &test_state.validator_public[s2.validator_index() as usize], + ).unwrap(); + } + ); + + // This `Invalid` statement contradicts the `Valid` statement the subsystem + // should have issued behind the scenes. let statement = CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOverseer::Communication{ msg: statement }).await; @@ -1461,6 +1842,7 @@ mod tests { virtual_overseer.recv().await, AllMessages::Provisioner( ProvisionerMessage::ProvisionableData( + _, ProvisionableData::MisbehaviorReport( relay_parent, MisbehaviorReport::SelfContradiction(_, s1, s2), @@ -1568,11 +1950,13 @@ mod tests { ) ) if pov == pov && &c == candidate_b.descriptor() => { tx.send(Ok( - ValidationResult::Valid(ValidationOutputs { + ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), upward_messages: Vec::new(), + horizontal_messages: Vec::new(), new_validation_code: None, processed_downward_messages: 0, + hrmp_watermark: 0, }, test_state.validation_data.persisted), )).unwrap(); } @@ -1581,8 +1965,8 @@ mod tests { assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreAvailableData(parent_hash, _, _, _, tx) - ) if parent_hash == test_state.relay_parent => { + AvailabilityStoreMessage::StoreAvailableData(candidate_hash, _, _, _, tx) + ) if candidate_hash == candidate_b.hash() => { tx.send(Ok(())).unwrap(); } ); @@ -1825,6 +2209,7 @@ mod tests { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( test_state.relay_parent, + vec![candidate.hash()], tx, ); @@ -1941,4 +2326,80 @@ mod tests { ).await; }); } + + #[test] + fn candidate_backing_reorders_votes() { + use sp_core::Encode; + + let relay_parent = [1; 32].into(); + let para_id = ParaId::from(10); + let session_index = 5; + let signing_context = SigningContext { parent_hash: relay_parent, session_index }; + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + Sr25519Keyring::One, + ]; + + let validator_public = validator_pubkeys(&validators); + let validator_groups = { + let mut validator_groups = HashMap::new(); + validator_groups.insert(para_id, vec![0, 1, 2, 3, 4, 5]); + validator_groups + }; + + let table_context = TableContext { + signing_context, + validator: None, + groups: validator_groups, + validators: validator_public.clone(), + }; + + let fake_attestation = |idx: u32| { + let candidate: CommittedCandidateReceipt = Default::default(); + let hash = candidate.hash(); + let mut data = vec![0; 64]; + data[0..32].copy_from_slice(hash.0.as_bytes()); + data[32..36].copy_from_slice(idx.encode().as_slice()); + + let sig = ValidatorSignature::try_from(data).unwrap(); + statement_table::generic::ValidityAttestation::Implicit(sig) + }; + + let attested = TableAttestedCandidate { + candidate: Default::default(), + validity_votes: vec![ + (5, fake_attestation(5)), + (3, fake_attestation(3)), + (1, fake_attestation(1)), + ], + group_id: para_id, + }; + + let backed = table_attested_to_backed(attested, &table_context).unwrap(); + + let expected_bitvec = { + let mut validator_indices = BitVec::::with_capacity(6); + validator_indices.resize(6, false); + + validator_indices.set(1, true); + validator_indices.set(3, true); + validator_indices.set(5, true); + + validator_indices + }; + + // Should be in bitfield order, which is opposite to the order provided to the function. + let expected_attestations = vec![ + fake_attestation(1).into(), + fake_attestation(3).into(), + fake_attestation(5).into(), + ]; + + assert_eq!(backed.validator_indices, expected_bitvec); + assert_eq!(backed.validity_votes, expected_attestations); + } } diff --git a/node/core/bitfield-signing/Cargo.toml b/node/core/bitfield-signing/Cargo.toml index 1ad319617ad70fb5f26a574824fd3371bef3a7cf..f80bfeb852da7f1f2c9dcf649f4fc6a201b5ba8e 100644 --- a/node/core/bitfield-signing/Cargo.toml +++ b/node/core/bitfield-signing/Cargo.toml @@ -5,12 +5,12 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -bitvec = "0.17.4" -futures = "0.3.5" -log = "0.4.11" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -wasm-timer = "0.2.4" -thiserror = "1.0.21" +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +wasm-timer = "0.2.5" +thiserror = "1.0.22" diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 48bc8df70a5aa4ac99b7abd2aff85cc1542d1f83..7937d908efbd1a782e3a843e466a1a967d06ac22 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -16,107 +16,34 @@ //! The bitfield signing subsystem produces `SignedAvailabilityBitfield`s once per block. -#![deny(unused_crate_dependencies, unused_results)] +#![deny(unused_crate_dependencies)] #![warn(missing_docs)] +#![recursion_limit="256"] -use bitvec::bitvec; -use futures::{ - channel::{mpsc, oneshot}, - prelude::*, - stream, Future, -}; +use futures::{channel::{mpsc, oneshot}, lock::Mutex, prelude::*, future, Future}; use sp_keystore::{Error as KeystoreError, SyncCryptoStorePtr}; use polkadot_node_subsystem::{ messages::{ - self, AllMessages, AvailabilityStoreMessage, BitfieldDistributionMessage, - BitfieldSigningMessage, CandidateBackingMessage, RuntimeApiMessage, + AllMessages, AvailabilityStoreMessage, BitfieldDistributionMessage, + BitfieldSigningMessage, RuntimeApiMessage, RuntimeApiRequest, }, errors::RuntimeApiError, }; use polkadot_node_subsystem_util::{ - self as util, JobManager, JobTrait, ToJobTrait, Validator, - metrics::{self, prometheus}, + self as util, JobManager, JobTrait, Validator, FromJobCommand, metrics::{self, prometheus}, }; use polkadot_primitives::v1::{AvailabilityBitfield, CoreState, Hash, ValidatorIndex}; -use std::{convert::TryFrom, pin::Pin, time::Duration}; +use std::{pin::Pin, time::Duration, iter::FromIterator}; use wasm_timer::{Delay, Instant}; use thiserror::Error; /// Delay between starting a bitfield signing job and its attempting to create a bitfield. const JOB_DELAY: Duration = Duration::from_millis(1500); +const LOG_TARGET: &str = "bitfield_signing"; /// Each `BitfieldSigningJob` prepares a signed bitfield for a single relay parent. pub struct BitfieldSigningJob; -/// Messages which a `BitfieldSigningJob` is prepared to receive. -#[allow(missing_docs)] -pub enum ToJob { - BitfieldSigning(BitfieldSigningMessage), - Stop, -} - -impl ToJobTrait for ToJob { - const STOP: Self = ToJob::Stop; - - fn relay_parent(&self) -> Option { - match self { - Self::BitfieldSigning(bsm) => bsm.relay_parent(), - Self::Stop => None, - } - } -} - -impl TryFrom for ToJob { - type Error = (); - - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::BitfieldSigning(bsm) => Ok(ToJob::BitfieldSigning(bsm)), - _ => Err(()), - } - } -} - -impl From for ToJob { - fn from(bsm: BitfieldSigningMessage) -> ToJob { - ToJob::BitfieldSigning(bsm) - } -} - -/// Messages which may be sent from a `BitfieldSigningJob`. -#[allow(missing_docs)] -pub enum FromJob { - AvailabilityStore(AvailabilityStoreMessage), - BitfieldDistribution(BitfieldDistributionMessage), - CandidateBacking(CandidateBackingMessage), - RuntimeApi(RuntimeApiMessage), -} - -impl From for AllMessages { - fn from(from_job: FromJob) -> AllMessages { - match from_job { - FromJob::AvailabilityStore(asm) => AllMessages::AvailabilityStore(asm), - FromJob::BitfieldDistribution(bdm) => AllMessages::BitfieldDistribution(bdm), - FromJob::CandidateBacking(cbm) => AllMessages::CandidateBacking(cbm), - FromJob::RuntimeApi(ram) => AllMessages::RuntimeApi(ram), - } - } -} - -impl TryFrom for FromJob { - type Error = (); - - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::AvailabilityStore(asm) => Ok(Self::AvailabilityStore(asm)), - AllMessages::BitfieldDistribution(bdm) => Ok(Self::BitfieldDistribution(bdm)), - AllMessages::CandidateBacking(cbm) => Ok(Self::CandidateBacking(cbm)), - AllMessages::RuntimeApi(ram) => Ok(Self::RuntimeApi(ram)), - _ => Err(()), - } - } -} - /// Errors we may encounter in the course of executing the `BitfieldSigningSubsystem`. #[derive(Debug, Error)] pub enum Error { @@ -132,9 +59,6 @@ pub enum Error { /// a mspc channel failed to send #[error(transparent)] MpscSend(#[from] mpsc::SendError), - /// several errors collected into one - #[error("Multiple errours occured: {0:?}")] - Multiple(Vec), /// the runtime API failed to return what we wanted #[error(transparent)] Runtime(#[from] RuntimeApiError), @@ -143,31 +67,26 @@ pub enum Error { Keystore(KeystoreError), } -// if there is a candidate pending availability, query the Availability Store -// for whether we have the availability chunk for our validator index. +/// If there is a candidate pending availability, query the Availability Store +/// for whether we have the availability chunk for our validator index. +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_core_availability( relay_parent: Hash, core: CoreState, validator_idx: ValidatorIndex, - sender: &mpsc::Sender, + sender: &Mutex<&mut mpsc::Sender>, ) -> Result { - use messages::{ - AvailabilityStoreMessage::QueryChunkAvailability, - RuntimeApiRequest::CandidatePendingAvailability, - }; - use FromJob::{AvailabilityStore, RuntimeApi}; - use RuntimeApiMessage::Request; - - // we have to (cheaply) clone this sender so we can mutate it to actually send anything - let mut sender = sender.clone(); - if let CoreState::Occupied(core) = core { let (tx, rx) = oneshot::channel(); sender - .send(RuntimeApi(Request( - relay_parent, - CandidatePendingAvailability(core.para_id, tx), - ))) + .lock() + .await + .send( + AllMessages::from(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::CandidatePendingAvailability(core.para_id, tx), + )).into(), + ) .await?; let committed_candidate_receipt = match rx.await? { @@ -175,33 +94,37 @@ async fn get_core_availability( Ok(None) => return Ok(false), Err(e) => { // Don't take down the node on runtime API errors. - log::warn!(target: "bitfield_signing", "Encountered a runtime API error: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Encountered a runtime API error"); return Ok(false); } }; let (tx, rx) = oneshot::channel(); sender - .send(AvailabilityStore(QueryChunkAvailability( - committed_candidate_receipt.descriptor.pov_hash, - validator_idx, - tx, - ))) + .lock() + .await + .send( + AllMessages::from(AvailabilityStoreMessage::QueryChunkAvailability( + committed_candidate_receipt.hash(), + validator_idx, + tx, + )).into(), + ) .await?; return rx.await.map_err(Into::into); } + Ok(false) } -// delegates to the v1 runtime API -async fn get_availability_cores(relay_parent: Hash, sender: &mut mpsc::Sender) -> Result, Error> { - use FromJob::RuntimeApi; - use messages::{ - RuntimeApiMessage::Request, - RuntimeApiRequest::AvailabilityCores, - }; - +/// delegates to the v1 runtime API +async fn get_availability_cores( + relay_parent: Hash, + sender: &mut mpsc::Sender, +) -> Result, Error> { let (tx, rx) = oneshot::channel(); - sender.send(RuntimeApi(Request(relay_parent, AvailabilityCores(tx)))).await?; + sender + .send(AllMessages::from(RuntimeApiMessage::Request(relay_parent, RuntimeApiRequest::AvailabilityCores(tx))).into()) + .await?; match rx.await { Ok(Ok(out)) => Ok(out), Ok(Err(runtime_err)) => Err(runtime_err.into()), @@ -209,62 +132,39 @@ async fn get_availability_cores(relay_parent: Hash, sender: &mut mpsc::Sender, + sender: &mut mpsc::Sender, ) -> Result { - use futures::lock::Mutex; - // get the set of availability cores from the runtime let availability_cores = get_availability_cores(relay_parent, sender).await?; - // we now need sender to be immutable so we can copy the reference to multiple concurrent closures - let sender = &*sender; - - // prepare outputs - let out = Mutex::new(bitvec!(bitvec::order::Lsb0, u8; 0; availability_cores.len())); - // in principle, we know that we never want concurrent access to the _same_ bit within the vec; - // we could `let out_ref = out.as_mut_ptr();` here instead, and manually assign bits, avoiding - // any need to ever wait to lock this mutex. - // in practice, it's safer to just use the mutex, and speed optimizations should wait until - // benchmarking proves that they are necessary. - let out_ref = &out; - let errs = Mutex::new(Vec::new()); - let errs_ref = &errs; - - // Handle each (idx, core) pair concurrently + // Wrap the sender in a Mutex to share it between the futures. // - // In principle, this work is all concurrent, not parallel. In practice, we can't guarantee it, which is why - // we need the mutexes and explicit references above. - stream::iter(availability_cores.into_iter().enumerate()) - .for_each_concurrent(None, |(idx, core)| async move { - let availability = match get_core_availability(relay_parent, core, validator_idx, sender).await { - Ok(availability) => availability, - Err(err) => { - errs_ref.lock().await.push(err); - return; - } - }; - out_ref.lock().await.set(idx, availability); - }) - .await; - - let errs = errs.into_inner(); - if errs.is_empty() { - Ok(out.into_inner().into()) - } else { - Err(Error::Multiple(errs.into())) - } + // We use a `Mutex` here to not `clone` the sender inside the future, because + // cloning the sender will always increase the capacity of the channel by one. + // (for the lifetime of the sender) + let sender = Mutex::new(sender); + + // Handle all cores concurrently + // `try_join_all` returns all results in the same order as the input futures. + let results = future::try_join_all( + availability_cores.into_iter().map(|core| get_core_availability(relay_parent, core, validator_idx, &sender)), + ).await?; + + Ok(AvailabilityBitfield(FromIterator::from_iter(results))) } #[derive(Clone)] struct MetricsInner { bitfields_signed_total: prometheus::Counter, + run: prometheus::Histogram, } /// Bitfield signing metrics. @@ -277,6 +177,11 @@ impl Metrics { metrics.bitfields_signed_total.inc(); } } + + /// Provide a timer for `prune_povs` which observes on drop. + fn time_run(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.run.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -289,14 +194,22 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + run: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_bitfield_signing_run", + "Time spent within `bitfield_signing::run`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } impl JobTrait for BitfieldSigningJob { - type ToJob = ToJob; - type FromJob = FromJob; + type ToJob = BitfieldSigningMessage; type Error = Error; type RunArgs = SyncCryptoStorePtr; type Metrics = Metrics; @@ -304,15 +217,16 @@ impl JobTrait for BitfieldSigningJob { const NAME: &'static str = "BitfieldSigningJob"; /// Run a job for the parent block indicated + #[tracing::instrument(skip(keystore, metrics, _receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, keystore: Self::RunArgs, metrics: Self::Metrics, - _receiver: mpsc::Receiver, - mut sender: mpsc::Sender, + _receiver: mpsc::Receiver, + mut sender: mpsc::Sender, ) -> Pin> + Send>> { + let metrics = metrics.clone(); async move { - // figure out when to wait to let wait_until = Instant::now() + JOB_DELAY; // now do all the work we can before we need to wait for the availability store @@ -326,12 +240,16 @@ impl JobTrait for BitfieldSigningJob { // wait a bit before doing anything else Delay::new_at(wait_until).await?; + // this timer does not appear at the head of the function because we don't want to include + // JOB_DELAY each time. + let _timer = metrics.time_run(); + let bitfield = match construct_availability_bitfield(relay_parent, validator.index(), &mut sender).await { Err(Error::Runtime(runtime_err)) => { // Don't take down the node on runtime API errors. - log::warn!(target: "bitfield_signing", "Encountered a runtime API error: {:?}", runtime_err); + tracing::warn!(target: LOG_TARGET, err = ?runtime_err, "Encountered a runtime API error"); return Ok(()); } Err(err) => return Err(err), @@ -344,24 +262,96 @@ impl JobTrait for BitfieldSigningJob { .map_err(|e| Error::Keystore(e))?; metrics.on_bitfield_signed(); - // make an anonymous scope to contain some use statements to simplify creating the outbound message - { - use BitfieldDistributionMessage::DistributeBitfield; - use FromJob::BitfieldDistribution; - - sender - .send(BitfieldDistribution(DistributeBitfield( - relay_parent, - signed_bitfield, - ))) - .await - .map_err(Into::into) - } + sender + .send( + AllMessages::from( + BitfieldDistributionMessage::DistributeBitfield(relay_parent, signed_bitfield), + ).into(), + ) + .await + .map_err(Into::into) } .boxed() } } /// BitfieldSigningSubsystem manages a number of bitfield signing jobs. -pub type BitfieldSigningSubsystem = - JobManager; +pub type BitfieldSigningSubsystem = JobManager; + +#[cfg(test)] +mod tests { + use super::*; + use futures::{pin_mut, executor::block_on}; + use polkadot_primitives::v1::OccupiedCore; + + fn occupied_core(para_id: u32) -> CoreState { + CoreState::Occupied(OccupiedCore { + para_id: para_id.into(), + group_responsible: para_id.into(), + next_up_on_available: None, + occupied_since: 100_u32, + time_out_at: 200_u32, + next_up_on_time_out: None, + availability: Default::default(), + }) + } + + #[test] + fn construct_availability_bitfield_works() { + block_on(async move { + let (mut sender, mut receiver) = mpsc::channel(10); + let relay_parent = Hash::default(); + let validator_index = 1u32; + + let future = construct_availability_bitfield(relay_parent, validator_index, &mut sender).fuse(); + pin_mut!(future); + + loop { + futures::select! { + m = receiver.next() => match m.unwrap() { + FromJobCommand::SendMessage( + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(rp, RuntimeApiRequest::AvailabilityCores(tx)), + ), + ) => { + assert_eq!(relay_parent, rp); + tx.send(Ok(vec![CoreState::Free, occupied_core(1), occupied_core(2)])).unwrap(); + }, + FromJobCommand::SendMessage( + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(rp, RuntimeApiRequest::CandidatePendingAvailability(para_id, tx)), + ), + ) => { + assert_eq!(relay_parent, rp); + + if para_id == 1.into() { + tx.send(Ok(Some(Default::default()))).unwrap(); + } else { + tx.send(Ok(None)).unwrap(); + } + }, + FromJobCommand::SendMessage( + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::QueryChunkAvailability(_, vidx, tx), + ), + ) => { + assert_eq!(validator_index, vidx); + + tx.send(true).unwrap(); + }, + o => panic!("Unknown message: {:?}", o), + }, + r = future => match r { + Ok(r) => { + assert!(!r.0.get(0).unwrap()); + assert!(r.0.get(1).unwrap()); + assert!(!r.0.get(2).unwrap()); + break + }, + Err(e) => panic!("Failed: {:?}", e), + }, + } + } + }); + } +} diff --git a/node/core/candidate-selection/Cargo.toml b/node/core/candidate-selection/Cargo.toml index 071f83d53c4c41e1393b1d5ed9ed04c80a0249a5..33d1b86b30ff4bc9a622a78d4f0f9d0330db521d 100644 --- a/node/core/candidate-selection/Cargo.toml +++ b/node/core/candidate-selection/Cargo.toml @@ -5,13 +5,16 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" -thiserror = "1.0.21" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" +thiserror = "1.0.22" + +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } + polkadot-primitives = { path = "../../../primitives" } -polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/core/candidate-selection/src/lib.rs b/node/core/candidate-selection/src/lib.rs index 4399a90f599496620503fd8325b50ed9b0dd1480..5812a47f156902d982b3f87843e69ec385e7e344 100644 --- a/node/core/candidate-selection/src/lib.rs +++ b/node/core/candidate-selection/src/lib.rs @@ -23,100 +23,34 @@ use futures::{ channel::{mpsc, oneshot}, prelude::*, }; -use polkadot_node_primitives::ValidationResult; +use sp_keystore::SyncCryptoStorePtr; use polkadot_node_subsystem::{ - errors::{ChainApiError, RuntimeApiError}, + errors::ChainApiError, messages::{ - AllMessages, CandidateBackingMessage, CandidateSelectionMessage, - CandidateValidationMessage, CollatorProtocolMessage, + AllMessages, CandidateBackingMessage, CandidateSelectionMessage, CollatorProtocolMessage, + RuntimeApiRequest, }, }; use polkadot_node_subsystem_util::{ - self as util, delegated_subsystem, JobTrait, ToJobTrait, - metrics::{self, prometheus}, + self as util, request_from_runtime, request_validator_groups, delegated_subsystem, + JobTrait, FromJobCommand, Validator, metrics::{self, prometheus}, }; use polkadot_primitives::v1::{ - CandidateDescriptor, CandidateReceipt, CollatorId, Hash, Id as ParaId, PoV, + CandidateReceipt, CollatorId, CoreState, CoreIndex, Hash, Id as ParaId, PoV, }; -use std::{convert::TryFrom, pin::Pin, sync::Arc}; +use std::pin::Pin; use thiserror::Error; -const TARGET: &'static str = "candidate_selection"; +const LOG_TARGET: &'static str = "candidate_selection"; struct CandidateSelectionJob { - sender: mpsc::Sender, - receiver: mpsc::Receiver, + assignment: ParaId, + sender: mpsc::Sender, + receiver: mpsc::Receiver, metrics: Metrics, seconded_candidate: Option, } -/// This enum defines the messages that the provisioner is prepared to receive. -#[derive(Debug)] -pub enum ToJob { - /// The provisioner message is the main input to the provisioner. - CandidateSelection(CandidateSelectionMessage), - /// This message indicates that the provisioner should shut itself down. - Stop, -} - -impl ToJobTrait for ToJob { - const STOP: Self = Self::Stop; - - fn relay_parent(&self) -> Option { - match self { - Self::CandidateSelection(csm) => csm.relay_parent(), - Self::Stop => None, - } - } -} - -impl TryFrom for ToJob { - type Error = (); - - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::CandidateSelection(csm) => Ok(Self::CandidateSelection(csm)), - _ => Err(()), - } - } -} - -impl From for ToJob { - fn from(csm: CandidateSelectionMessage) -> Self { - Self::CandidateSelection(csm) - } -} - -#[derive(Debug)] -enum FromJob { - Validation(CandidateValidationMessage), - Backing(CandidateBackingMessage), - Collator(CollatorProtocolMessage), -} - -impl From for AllMessages { - fn from(from_job: FromJob) -> AllMessages { - match from_job { - FromJob::Validation(msg) => AllMessages::CandidateValidation(msg), - FromJob::Backing(msg) => AllMessages::CandidateBacking(msg), - FromJob::Collator(msg) => AllMessages::CollatorProtocol(msg), - } - } -} - -impl TryFrom for FromJob { - type Error = (); - - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::CandidateValidation(msg) => Ok(FromJob::Validation(msg)), - AllMessages::CandidateBacking(msg) => Ok(FromJob::Backing(msg)), - AllMessages::CollatorProtocol(msg) => Ok(FromJob::Collator(msg)), - _ => Err(()), - } - } -} - #[derive(Debug, Error)] enum Error { #[error(transparent)] @@ -127,76 +61,124 @@ enum Error { OneshotRecv(#[from] oneshot::Canceled), #[error(transparent)] ChainApi(#[from] ChainApiError), - #[error(transparent)] - Runtime(#[from] RuntimeApiError), +} + +macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to fetch runtime API data for job", + ); + + // We can't do candidate selection work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(()); + } + } + } } impl JobTrait for CandidateSelectionJob { - type ToJob = ToJob; - type FromJob = FromJob; + type ToJob = CandidateSelectionMessage; type Error = Error; - type RunArgs = (); + type RunArgs = SyncCryptoStorePtr; type Metrics = Metrics; const NAME: &'static str = "CandidateSelectionJob"; - /// Run a job for the parent block indicated - // - // this function is in charge of creating and executing the job's main loop + #[tracing::instrument(skip(keystore, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] fn run( - _relay_parent: Hash, - _run_args: Self::RunArgs, + relay_parent: Hash, + keystore: Self::RunArgs, metrics: Self::Metrics, - receiver: mpsc::Receiver, - sender: mpsc::Sender, + receiver: mpsc::Receiver, + mut sender: mpsc::Sender, ) -> Pin> + Send>> { - Box::pin(async move { - let job = CandidateSelectionJob::new(metrics, sender, receiver); + async move { + let (groups, cores) = futures::try_join!( + try_runtime_api!(request_validator_groups(relay_parent, &mut sender).await), + try_runtime_api!(request_from_runtime( + relay_parent, + &mut sender, + |tx| RuntimeApiRequest::AvailabilityCores(tx), + ).await), + )?; + + let (validator_groups, group_rotation_info) = try_runtime_api!(groups); + let cores = try_runtime_api!(cores); + + let n_cores = cores.len(); + + let validator = match Validator::new(relay_parent, keystore.clone(), sender.clone()).await { + Ok(validator) => validator, + Err(util::Error::NotAValidator) => return Ok(()), + Err(err) => return Err(Error::Util(err)), + }; + + let mut assignment = None; + + for (idx, core) in cores.into_iter().enumerate() { + // Ignore prospective assignments on occupied cores for the time being. + if let CoreState::Scheduled(scheduled) = core { + let core_index = CoreIndex(idx as _); + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + if let Some(g) = validator_groups.get(group_index.0 as usize) { + if g.contains(&validator.index()) { + assignment = Some(scheduled.para_id); + break; + } + } + } + } - // it isn't necessary to break run_loop into its own function, - // but it's convenient to separate the concerns in this way - job.run_loop().await - }) + let assignment = match assignment { + Some(assignment) => assignment, + None => return Ok(()), + }; + + CandidateSelectionJob::new(assignment, metrics, sender, receiver).run_loop().await + }.boxed() } } impl CandidateSelectionJob { pub fn new( + assignment: ParaId, metrics: Metrics, - sender: mpsc::Sender, - receiver: mpsc::Receiver, + sender: mpsc::Sender, + receiver: mpsc::Receiver, ) -> Self { Self { sender, receiver, metrics, + assignment, seconded_candidate: None, } } - async fn run_loop(mut self) -> Result<(), Error> { - self.run_loop_borrowed().await - } - - /// this function exists for testing and should not generally be used; use `run_loop` instead. - async fn run_loop_borrowed(&mut self) -> Result<(), Error> { - while let Some(msg) = self.receiver.next().await { - match msg { - ToJob::CandidateSelection(CandidateSelectionMessage::Collation( + async fn run_loop(&mut self) -> Result<(), Error> { + loop { + match self.receiver.next().await { + Some(CandidateSelectionMessage::Collation( relay_parent, para_id, collator_id, )) => { - self.handle_collation(relay_parent, para_id, collator_id) - .await; + self.handle_collation(relay_parent, para_id, collator_id).await; } - ToJob::CandidateSelection(CandidateSelectionMessage::Invalid( + Some(CandidateSelectionMessage::Invalid( _, candidate_receipt, )) => { self.handle_invalid(candidate_receipt).await; } - ToJob::Stop => break, + None => break, } } @@ -206,12 +188,32 @@ impl CandidateSelectionJob { Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn handle_collation( &mut self, relay_parent: Hash, para_id: ParaId, collator_id: CollatorId, ) { + let _timer = self.metrics.time_handle_collation(); + + if self.assignment != para_id { + tracing::info!( + target: LOG_TARGET, + "Collator {:?} sent a collation outside of our assignment {:?}", + collator_id, + para_id, + ); + if let Err(err) = forward_invalidity_note(&collator_id, &mut self.sender).await { + tracing::warn!( + target: LOG_TARGET, + err = ?err, + "failed to forward invalidity note", + ); + } + return; + } + if self.seconded_candidate.is_none() { let (candidate_receipt, pov) = match get_collation( @@ -222,34 +224,15 @@ impl CandidateSelectionJob { ).await { Ok(response) => response, Err(err) => { - log::warn!( - target: TARGET, - "failed to get collation from collator protocol subsystem: {:?}", - err + tracing::warn!( + target: LOG_TARGET, + err = ?err, + "failed to get collation from collator protocol subsystem", ); return; } }; - let pov = Arc::new(pov); - - if !candidate_is_valid( - candidate_receipt.descriptor.clone(), - pov.clone(), - self.sender.clone(), - ) - .await - { - return; - } - - let pov = if let Ok(pov) = Arc::try_unwrap(pov) { - pov - } else { - log::warn!(target: TARGET, "Arc unwrapping is expected to succeed, the other fns should have already run to completion by now."); - return; - }; - match second_candidate( relay_parent, candidate_receipt, @@ -259,35 +242,38 @@ impl CandidateSelectionJob { ) .await { - Err(err) => log::warn!(target: TARGET, "failed to second a candidate: {:?}", err), + Err(err) => tracing::warn!(target: LOG_TARGET, err = ?err, "failed to second a candidate"), Ok(()) => self.seconded_candidate = Some(collator_id), } } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn handle_invalid(&mut self, candidate_receipt: CandidateReceipt) { + let _timer = self.metrics.time_handle_invalid(); + let received_from = match &self.seconded_candidate { Some(peer) => peer, None => { - log::warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "received invalidity notice for a candidate we don't remember seconding" ); return; } }; - log::info!( - target: TARGET, - "received invalidity note for candidate {:?}", - candidate_receipt + tracing::info!( + target: LOG_TARGET, + candidate_receipt = ?candidate_receipt, + "received invalidity note for candidate", ); let result = if let Err(err) = forward_invalidity_note(received_from, &mut self.sender).await { - log::warn!( - target: TARGET, - "failed to forward invalidity note: {:?}", - err + tracing::warn!( + target: LOG_TARGET, + err = ?err, + "failed to forward invalidity note", ); Err(()) } else { @@ -300,72 +286,42 @@ impl CandidateSelectionJob { // get a collation from the Collator Protocol subsystem // // note that this gets an owned clone of the sender; that's becuase unlike `forward_invalidity_note`, it's expected to take a while longer +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_collation( relay_parent: Hash, para_id: ParaId, collator_id: CollatorId, - mut sender: mpsc::Sender, + mut sender: mpsc::Sender, ) -> Result<(CandidateReceipt, PoV), Error> { let (tx, rx) = oneshot::channel(); sender - .send(FromJob::Collator(CollatorProtocolMessage::FetchCollation( + .send(AllMessages::from(CollatorProtocolMessage::FetchCollation( relay_parent, collator_id, para_id, tx, - ))) + )).into()) .await?; rx.await.map_err(Into::into) } -// find out whether a candidate is valid or not -async fn candidate_is_valid( - candidate_descriptor: CandidateDescriptor, - pov: Arc, - sender: mpsc::Sender, -) -> bool { - std::matches!( - candidate_is_valid_inner(candidate_descriptor, pov, sender).await, - Ok(true) - ) -} - -// find out whether a candidate is valid or not, with a worse interface -// the external interface is worse, but the internal implementation is easier -async fn candidate_is_valid_inner( - candidate_descriptor: CandidateDescriptor, - pov: Arc, - mut sender: mpsc::Sender, -) -> Result { - let (tx, rx) = oneshot::channel(); - sender - .send(FromJob::Validation( - CandidateValidationMessage::ValidateFromChainState(candidate_descriptor, pov, tx), - )) - .await?; - Ok(std::matches!( - rx.await, - Ok(Ok(ValidationResult::Valid(_, _))) - )) -} - async fn second_candidate( relay_parent: Hash, candidate_receipt: CandidateReceipt, pov: PoV, - sender: &mut mpsc::Sender, + sender: &mut mpsc::Sender, metrics: &Metrics, ) -> Result<(), Error> { match sender - .send(FromJob::Backing(CandidateBackingMessage::Second( + .send(AllMessages::from(CandidateBackingMessage::Second( relay_parent, candidate_receipt, pov, - ))) + )).into()) .await { Err(err) => { - log::warn!(target: TARGET, "failed to send a seconding message"); + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to send a seconding message"); metrics.on_second(Err(())); Err(err.into()) } @@ -378,12 +334,12 @@ async fn second_candidate( async fn forward_invalidity_note( received_from: &CollatorId, - sender: &mut mpsc::Sender, + sender: &mut mpsc::Sender, ) -> Result<(), Error> { sender - .send(FromJob::Collator(CollatorProtocolMessage::ReportCollator( + .send(AllMessages::from(CollatorProtocolMessage::ReportCollator( received_from.clone(), - ))) + )).into()) .await .map_err(Into::into) } @@ -392,6 +348,8 @@ async fn forward_invalidity_note( struct MetricsInner { seconds: prometheus::CounterVec, invalid_selections: prometheus::CounterVec, + handle_collation: prometheus::Histogram, + handle_invalid: prometheus::Histogram, } /// Candidate selection metrics. @@ -412,6 +370,16 @@ impl Metrics { metrics.invalid_selections.with_label_values(&[label]).inc(); } } + + /// Provide a timer for `handle_collation` which observes on drop. + fn time_handle_collation(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_collation.start_timer()) + } + + /// Provide a timer for `handle_invalid` which observes on drop. + fn time_handle_invalid(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_invalid.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -437,19 +405,38 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + handle_collation: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_selection_handle_collation", + "Time spent within `candidate_selection::handle_collation`", + ) + )?, + registry, + )?, + handle_invalid: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_selection:handle_invalid", + "Time spent within `candidate_selection::handle_invalid`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } -delegated_subsystem!(CandidateSelectionJob((), Metrics) <- ToJob as CandidateSelectionSubsystem); +delegated_subsystem!(CandidateSelectionJob(SyncCryptoStorePtr, Metrics) <- CandidateSelectionMessage as CandidateSelectionSubsystem); #[cfg(test)] mod tests { use super::*; use futures::lock::Mutex; - use polkadot_primitives::v1::{BlockData, HeadData, PersistedValidationData, ValidationOutputs}; + use polkadot_primitives::v1::BlockData; use sp_core::crypto::Public; + use std::sync::Arc; fn test_harness( preconditions: Preconditions, @@ -457,13 +444,14 @@ mod tests { postconditions: Postconditions, ) where Preconditions: FnOnce(&mut CandidateSelectionJob), - TestBuilder: FnOnce(mpsc::Sender, mpsc::Receiver) -> Test, + TestBuilder: FnOnce(mpsc::Sender, mpsc::Receiver) -> Test, Test: Future, Postconditions: FnOnce(CandidateSelectionJob, Result<(), Error>), { let (to_job_tx, to_job_rx) = mpsc::channel(0); let (from_job_tx, from_job_rx) = mpsc::channel(0); let mut job = CandidateSelectionJob { + assignment: 123.into(), sender: from_job_tx, receiver: to_job_rx, metrics: Default::default(), @@ -474,36 +462,12 @@ mod tests { let (_, job_result) = futures::executor::block_on(future::join( test(to_job_tx, from_job_rx), - job.run_loop_borrowed(), + job.run_loop(), )); postconditions(job, job_result); } - fn default_validation_outputs_and_data() -> (ValidationOutputs, polkadot_primitives::v1::PersistedValidationData) { - let head_data: Vec = (0..32).rev().cycle().take(256).collect(); - let parent_head_data = head_data - .iter() - .copied() - .map(|x| x.saturating_sub(1)) - .collect(); - - ( - ValidationOutputs { - head_data: HeadData(head_data), - upward_messages: Vec::new(), - new_validation_code: None, - processed_downward_messages: 0, - }, - PersistedValidationData { - parent_head: HeadData(parent_head_data), - block_number: 123, - hrmp_mqc_heads: Vec::new(), - dmq_mqc_head: Default::default(), - }, - ) - } - /// when nothing is seconded so far, the collation is fetched and seconded #[test] fn fetches_and_seconds_a_collation() { @@ -524,12 +488,10 @@ mod tests { |_job| {}, |mut to_job, mut from_job| async move { to_job - .send(ToJob::CandidateSelection( - CandidateSelectionMessage::Collation( - relay_parent, - para_id, - collator_id_clone.clone(), - ), + .send(CandidateSelectionMessage::Collation( + relay_parent, + para_id, + collator_id_clone.clone(), )) .await .unwrap(); @@ -537,12 +499,12 @@ mod tests { while let Some(msg) = from_job.next().await { match msg { - FromJob::Collator(CollatorProtocolMessage::FetchCollation( + FromJobCommand::SendMessage(AllMessages::CollatorProtocol(CollatorProtocolMessage::FetchCollation( got_relay_parent, collator_id, got_para_id, return_sender, - )) => { + ))) => { assert_eq!(got_relay_parent, relay_parent); assert_eq!(got_para_id, para_id); assert_eq!(collator_id, collator_id_clone); @@ -551,26 +513,11 @@ mod tests { .send((candidate_receipt.clone(), pov.clone())) .unwrap(); } - FromJob::Validation( - CandidateValidationMessage::ValidateFromChainState( - got_candidate_descriptor, - got_pov, - return_sender, - ), - ) => { - assert_eq!(got_candidate_descriptor, candidate_receipt.descriptor); - assert_eq!(got_pov.as_ref(), &pov); - - let (outputs, data) = default_validation_outputs_and_data(); - return_sender - .send(Ok(ValidationResult::Valid(outputs, data))) - .unwrap(); - } - FromJob::Backing(CandidateBackingMessage::Second( + FromJobCommand::SendMessage(AllMessages::CandidateBacking(CandidateBackingMessage::Second( got_relay_parent, got_candidate_receipt, got_pov, - )) => { + ))) => { assert_eq!(got_relay_parent, relay_parent); assert_eq!(got_candidate_receipt, candidate_receipt); assert_eq!(got_pov, pov); @@ -606,12 +553,10 @@ mod tests { |job| job.seconded_candidate = Some(prev_collator_id.clone()), |mut to_job, mut from_job| async move { to_job - .send(ToJob::CandidateSelection( - CandidateSelectionMessage::Collation( - relay_parent, - para_id, - collator_id_clone, - ), + .send(CandidateSelectionMessage::Collation( + relay_parent, + para_id, + collator_id_clone, )) .await .unwrap(); @@ -619,11 +564,11 @@ mod tests { while let Some(msg) = from_job.next().await { match msg { - FromJob::Backing(CandidateBackingMessage::Second( + FromJobCommand::SendMessage(AllMessages::CandidateBacking(CandidateBackingMessage::Second( _got_relay_parent, _got_candidate_receipt, _got_pov, - )) => { + ))) => { *was_seconded_clone.lock().await = true; } other => panic!("unexpected message from job: {:?}", other), @@ -655,18 +600,16 @@ mod tests { |job| job.seconded_candidate = Some(collator_id.clone()), |mut to_job, mut from_job| async move { to_job - .send(ToJob::CandidateSelection( - CandidateSelectionMessage::Invalid(relay_parent, candidate_receipt), - )) + .send(CandidateSelectionMessage::Invalid(relay_parent, candidate_receipt)) .await .unwrap(); std::mem::drop(to_job); while let Some(msg) = from_job.next().await { match msg { - FromJob::Collator(CollatorProtocolMessage::ReportCollator( + FromJobCommand::SendMessage(AllMessages::CollatorProtocol(CollatorProtocolMessage::ReportCollator( got_collator_id, - )) => { + ))) => { assert_eq!(got_collator_id, collator_id_clone); *sent_report_clone.lock().await = true; diff --git a/node/core/candidate-validation/Cargo.toml b/node/core/candidate-validation/Cargo.toml index 87cb639a360171927456d64137b6fdcfe6d11f15..41703d2293681f2f91a8d2efc12ca92d455cb4c2 100644 --- a/node/core/candidate-validation/Cargo.toml +++ b/node/core/candidate-validation/Cargo.toml @@ -5,11 +5,12 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" -sp-core = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" } -parity-scale-codec = { version = "1.3.0", default-features = false, features = ["bit-vec", "derive"] } +sp-core = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["bit-vec", "derive"] } polkadot-primitives = { path = "../../../primitives" } polkadot-parachain = { path = "../../../parachain" } @@ -18,7 +19,7 @@ polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsys polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -futures = { version = "0.3.5", features = ["thread-pool"] } -assert_matches = "1.3.0" +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +futures = { version = "0.3.8", features = ["thread-pool"] } +assert_matches = "1.4.0" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index 360257ae32b1af3338f4f0ca9e4fcd0f3ce56ddd..3c64f962c2afd5d294f67dab7e28bcb76d142132 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -31,18 +31,15 @@ use polkadot_subsystem::{ ValidationFailed, RuntimeApiRequest, }, }; -use polkadot_node_subsystem_util::{ - metrics::{self, prometheus}, -}; +use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_subsystem::errors::RuntimeApiError; use polkadot_node_primitives::{ValidationResult, InvalidCandidate}; use polkadot_primitives::v1::{ ValidationCode, PoV, CandidateDescriptor, PersistedValidationData, - OccupiedCoreAssumption, Hash, ValidationOutputs, + OccupiedCoreAssumption, Hash, CandidateCommitments, }; use polkadot_parachain::wasm_executor::{ - self, ValidationPool, ExecutionMode, ValidationError, - InvalidCandidate as WasmInvalidCandidate, + self, IsolationStrategy, ValidationError, InvalidCandidate as WasmInvalidCandidate }; use polkadot_parachain::primitives::{ValidationResult as WasmValidationResult, ValidationParams}; @@ -60,57 +57,16 @@ const LOG_TARGET: &'static str = "candidate_validation"; pub struct CandidateValidationSubsystem { spawn: S, metrics: Metrics, -} - -#[derive(Clone)] -struct MetricsInner { - validation_requests: prometheus::CounterVec, -} - -/// Candidate validation metrics. -#[derive(Default, Clone)] -pub struct Metrics(Option); - -impl Metrics { - fn on_validation_event(&self, event: &Result) { - if let Some(metrics) = &self.0 { - match event { - Ok(ValidationResult::Valid(_, _)) => { - metrics.validation_requests.with_label_values(&["valid"]).inc(); - }, - Ok(ValidationResult::Invalid(_)) => { - metrics.validation_requests.with_label_values(&["invalid"]).inc(); - }, - Err(_) => { - metrics.validation_requests.with_label_values(&["validation failure"]).inc(); - }, - } - } - } -} - -impl metrics::Metrics for Metrics { - fn try_register(registry: &prometheus::Registry) -> Result { - let metrics = MetricsInner { - validation_requests: prometheus::register( - prometheus::CounterVec::new( - prometheus::Opts::new( - "parachain_validation_requests_total", - "Number of validation requests served.", - ), - &["validity"], - )?, - registry, - )?, - }; - Ok(Metrics(Some(metrics))) - } + isolation_strategy: IsolationStrategy, } impl CandidateValidationSubsystem { - /// Create a new `CandidateValidationSubsystem` with the given task spawner. - pub fn new(spawn: S, metrics: Metrics) -> Self { - CandidateValidationSubsystem { spawn, metrics } + /// Create a new `CandidateValidationSubsystem` with the given task spawner and isolation + /// strategy. + /// + /// Check out [`IsolationStrategy`] to get more details. + pub fn new(spawn: S, metrics: Metrics, isolation_strategy: IsolationStrategy) -> Self { + CandidateValidationSubsystem { spawn, metrics, isolation_strategy } } } @@ -119,9 +75,8 @@ impl Subsystem for CandidateValidationSubsystem where S: SpawnNamed + Clone + 'static, { fn start(self, ctx: C) -> SpawnedSubsystem { - let future = run(ctx, self.spawn, self.metrics) + let future = run(ctx, self.spawn, self.metrics, self.isolation_strategy) .map_err(|e| SubsystemError::with_origin("candidate-validation", e)) - .map(|_| ()) .boxed(); SpawnedSubsystem { name: "candidate-validation-subsystem", @@ -130,15 +85,13 @@ impl Subsystem for CandidateValidationSubsystem where } } +#[tracing::instrument(skip(ctx, spawn, metrics), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, spawn: impl SpawnNamed + Clone + 'static, metrics: Metrics, -) - -> SubsystemResult<()> -{ - let execution_mode = ExecutionMode::ExternalProcessSelfHost(ValidationPool::new()); - + isolation_strategy: IsolationStrategy, +) -> SubsystemResult<()> { loop { match ctx.recv().await? { FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {} @@ -150,12 +103,15 @@ async fn run( pov, response_sender, ) => { + let _timer = metrics.time_validate_from_chain_state(); + let res = spawn_validate_from_chain_state( &mut ctx, - execution_mode.clone(), + isolation_strategy.clone(), descriptor, pov, spawn.clone(), + &metrics, ).await; match res { @@ -173,21 +129,24 @@ async fn run( pov, response_sender, ) => { + let _timer = metrics.time_validate_from_exhaustive(); + let res = spawn_validate_exhaustive( &mut ctx, - execution_mode.clone(), + isolation_strategy.clone(), persisted_validation_data, validation_code, descriptor, pov, spawn.clone(), + &metrics, ).await; match res { Ok(x) => { metrics.on_validation_event(&x); if let Err(_e) = response_sender.send(x) { - log::warn!( + tracing::warn!( target: LOG_TARGET, "Requester of candidate validation dropped", ) @@ -212,7 +171,7 @@ async fn runtime_api_request( relay_parent, request, )) - ).await?; + ).await; receiver.await.map_err(Into::into) } @@ -224,6 +183,7 @@ enum AssumptionCheckOutcome { BadRequest, } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn check_assumption_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -259,7 +219,7 @@ async fn check_assumption_validation_data( descriptor.relay_parent, RuntimeApiRequest::ValidationCode( descriptor.para_id, - OccupiedCoreAssumption::Included, + assumption, code_tx, ), code_rx, @@ -274,6 +234,7 @@ async fn check_assumption_validation_data( }) } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn find_assumed_validation_data( ctx: &mut impl SubsystemContext, descriptor: &CandidateDescriptor, @@ -286,31 +247,33 @@ async fn find_assumed_validation_data( const ASSUMPTIONS: &[OccupiedCoreAssumption] = &[ OccupiedCoreAssumption::Included, OccupiedCoreAssumption::TimedOut, - // TODO: Why don't we check `Free`? The guide assumes there are only two possible assumptions. - // - // Source that info and leave a comment here. + // `TimedOut` and `Free` both don't perform any speculation and therefore should be the same + // for our purposes here. In other words, if `TimedOut` matched then the `Free` must be + // matched as well. ]; // Consider running these checks in parallel to reduce validation latency. for assumption in ASSUMPTIONS { let outcome = check_assumption_validation_data(ctx, descriptor, *assumption).await?; - let () = match outcome { + match outcome { AssumptionCheckOutcome::Matches(_, _) => return Ok(outcome), AssumptionCheckOutcome::BadRequest => return Ok(outcome), AssumptionCheckOutcome::DoesNotMatch => continue, - }; + } } Ok(AssumptionCheckOutcome::DoesNotMatch) } +#[tracing::instrument(level = "trace", skip(ctx, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] async fn spawn_validate_from_chain_state( ctx: &mut impl SubsystemContext, - execution_mode: ExecutionMode, + isolation_strategy: IsolationStrategy, descriptor: CandidateDescriptor, pov: Arc, spawn: impl SpawnNamed + 'static, + metrics: &Metrics, ) -> SubsystemResult> { let (validation_data, validation_code) = match find_assumed_validation_data(ctx, &descriptor).await? { @@ -330,12 +293,13 @@ async fn spawn_validate_from_chain_state( let validation_result = spawn_validate_exhaustive( ctx, - execution_mode, + isolation_strategy, validation_data, validation_code, descriptor.clone(), pov, spawn, + metrics, ) .await; @@ -364,24 +328,28 @@ async fn spawn_validate_from_chain_state( validation_result } +#[tracing::instrument(level = "trace", skip(ctx, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] async fn spawn_validate_exhaustive( ctx: &mut impl SubsystemContext, - execution_mode: ExecutionMode, + isolation_strategy: IsolationStrategy, persisted_validation_data: PersistedValidationData, validation_code: ValidationCode, descriptor: CandidateDescriptor, pov: Arc, spawn: impl SpawnNamed + 'static, + metrics: &Metrics, ) -> SubsystemResult> { let (tx, rx) = oneshot::channel(); + let metrics = metrics.clone(); let fut = async move { let res = validate_candidate_exhaustive::( - execution_mode, + isolation_strategy, persisted_validation_data, validation_code, descriptor, pov, spawn, + &metrics, ); let _ = tx.send(res); @@ -393,18 +361,17 @@ async fn spawn_validate_exhaustive( /// Does basic checks of a candidate. Provide the encoded PoV-block. Returns `Ok` if basic checks /// are passed, `Err` otherwise. +#[tracing::instrument(level = "trace", skip(pov), fields(subsystem = LOG_TARGET))] fn perform_basic_checks( candidate: &CandidateDescriptor, - max_block_data_size: Option, + max_pov_size: u32, pov: &PoV, ) -> Result<(), InvalidCandidate> { let encoded_pov = pov.encode(); let hash = pov.hash(); - if let Some(max_size) = max_block_data_size { - if encoded_pov.len() as u64 > max_size { - return Err(InvalidCandidate::ParamsTooLarge(encoded_pov.len() as u64)); - } + if encoded_pov.len() > max_pov_size as usize { + return Err(InvalidCandidate::ParamsTooLarge(encoded_pov.len() as u64)); } if hash != candidate.pov_hash { @@ -432,10 +399,10 @@ trait ValidationBackend { struct RealValidationBackend; impl ValidationBackend for RealValidationBackend { - type Arg = ExecutionMode; + type Arg = IsolationStrategy; fn validate( - execution_mode: ExecutionMode, + isolation_strategy: IsolationStrategy, validation_code: &ValidationCode, params: ValidationParams, spawn: S, @@ -443,7 +410,7 @@ impl ValidationBackend for RealValidationBackend { wasm_executor::validate_candidate( &validation_code.0, params, - &execution_mode, + &isolation_strategy, spawn, ) } @@ -452,6 +419,7 @@ impl ValidationBackend for RealValidationBackend { /// Validates the candidate from exhaustive parameters. /// /// Sends the result of validation on the channel once complete. +#[tracing::instrument(level = "trace", skip(backend_arg, validation_code, pov, spawn, metrics), fields(subsystem = LOG_TARGET))] fn validate_candidate_exhaustive( backend_arg: B::Arg, persisted_validation_data: PersistedValidationData, @@ -459,8 +427,11 @@ fn validate_candidate_exhaustive( descriptor: CandidateDescriptor, pov: Arc, spawn: S, + metrics: &Metrics, ) -> Result { - if let Err(e) = perform_basic_checks(&descriptor, None, &*pov) { + let _timer = metrics.time_validate_candidate_exhaustive(); + + if let Err(e) = perform_basic_checks(&descriptor, persisted_validation_data.max_pov_size, &*pov) { return Ok(ValidationResult::Invalid(e)) } @@ -487,22 +458,114 @@ fn validate_candidate_exhaustive( Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e.to_string()))), Err(ValidationError::Internal(e)) => Err(ValidationFailed(e.to_string())), Ok(res) => { - let outputs = ValidationOutputs { + let outputs = CandidateCommitments { head_data: res.head_data, upward_messages: res.upward_messages, + horizontal_messages: res.horizontal_messages, new_validation_code: res.new_validation_code, processed_downward_messages: res.processed_downward_messages, + hrmp_watermark: res.hrmp_watermark, }; Ok(ValidationResult::Valid(outputs, persisted_validation_data)) } } } +#[derive(Clone)] +struct MetricsInner { + validation_requests: prometheus::CounterVec, + validate_from_chain_state: prometheus::Histogram, + validate_from_exhaustive: prometheus::Histogram, + validate_candidate_exhaustive: prometheus::Histogram, +} + +/// Candidate validation metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +impl Metrics { + fn on_validation_event(&self, event: &Result) { + if let Some(metrics) = &self.0 { + match event { + Ok(ValidationResult::Valid(_, _)) => { + metrics.validation_requests.with_label_values(&["valid"]).inc(); + }, + Ok(ValidationResult::Invalid(_)) => { + metrics.validation_requests.with_label_values(&["invalid"]).inc(); + }, + Err(_) => { + metrics.validation_requests.with_label_values(&["validation failure"]).inc(); + }, + } + } + } + + /// Provide a timer for `validate_from_chain_state` which observes on drop. + fn time_validate_from_chain_state(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.validate_from_chain_state.start_timer()) + } + + /// Provide a timer for `validate_from_exhaustive` which observes on drop. + fn time_validate_from_exhaustive(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.validate_from_exhaustive.start_timer()) + } + + /// Provide a timer for `validate_candidate_exhaustive` which observes on drop. + fn time_validate_candidate_exhaustive(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.validate_candidate_exhaustive.start_timer()) + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + validation_requests: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "parachain_validation_requests_total", + "Number of validation requests served.", + ), + &["validity"], + )?, + registry, + )?, + validate_from_chain_state: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_validation_validate_from_chain_state", + "Time spent within `candidate_validation::validate_from_chain_state`", + ) + )?, + registry, + )?, + validate_from_exhaustive: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_validation_validate_from_exhaustive", + "Time spent within `candidate_validation::validate_from_exhaustive`", + ) + )?, + registry, + )?, + validate_candidate_exhaustive: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_candidate_validation_validate_candidate_exhaustive", + "Time spent within `candidate_validation::validate_candidate_exhaustive`", + ) + )?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} + #[cfg(test)] mod tests { use super::*; use polkadot_node_subsystem_test_helpers as test_helpers; - use polkadot_primitives::v1::{HeadData, BlockData}; + use polkadot_primitives::v1::{HeadData, BlockData, UpwardMessage}; use sp_core::testing::TaskExecutor; use futures::executor; use assert_matches::assert_matches; @@ -649,7 +712,7 @@ mod tests { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( rp, - RuntimeApiRequest::ValidationCode(p, OccupiedCoreAssumption::Included, tx) + RuntimeApiRequest::ValidationCode(p, OccupiedCoreAssumption::TimedOut, tx) )) => { assert_eq!(rp, relay_parent); assert_eq!(p, para_id); @@ -757,7 +820,7 @@ mod tests { ctx_handle.recv().await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( rp, - RuntimeApiRequest::ValidationCode(p, OccupiedCoreAssumption::Included, tx) + RuntimeApiRequest::ValidationCode(p, OccupiedCoreAssumption::TimedOut, tx) )) => { assert_eq!(rp, relay_parent); assert_eq!(p, para_id); @@ -820,7 +883,7 @@ mod tests { #[test] fn candidate_validation_ok_is_ok() { - let validation_data: PersistedValidationData = Default::default(); + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; @@ -828,13 +891,15 @@ mod tests { descriptor.pov_hash = pov.hash(); collator_sign(&mut descriptor, Sr25519Keyring::Alice); - assert!(perform_basic_checks(&descriptor, Some(1024), &pov).is_ok()); + assert!(perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov).is_ok()); let validation_result = WasmValidationResult { head_data: HeadData(vec![1, 1, 1]), new_validation_code: Some(vec![2, 2, 2].into()), upward_messages: Vec::new(), + horizontal_messages: Vec::new(), processed_downward_messages: 0, + hrmp_watermark: 0, }; let v = validate_candidate_exhaustive::( @@ -844,19 +909,22 @@ mod tests { descriptor, Arc::new(pov), TaskExecutor::new(), + &Default::default(), ).unwrap(); assert_matches!(v, ValidationResult::Valid(outputs, used_validation_data) => { assert_eq!(outputs.head_data, HeadData(vec![1, 1, 1])); - assert_eq!(outputs.upward_messages, Vec::new()); + assert_eq!(outputs.upward_messages, Vec::::new()); + assert_eq!(outputs.horizontal_messages, Vec::new()); assert_eq!(outputs.new_validation_code, Some(vec![2, 2, 2].into())); + assert_eq!(outputs.hrmp_watermark, 0); assert_eq!(used_validation_data, validation_data); }); } #[test] fn candidate_validation_bad_return_is_invalid() { - let validation_data: PersistedValidationData = Default::default(); + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; @@ -864,7 +932,7 @@ mod tests { descriptor.pov_hash = pov.hash(); collator_sign(&mut descriptor, Sr25519Keyring::Alice); - assert!(perform_basic_checks(&descriptor, Some(1024), &pov).is_ok()); + assert!(perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov).is_ok()); let v = validate_candidate_exhaustive::( MockValidationArg { @@ -877,6 +945,7 @@ mod tests { descriptor, Arc::new(pov), TaskExecutor::new(), + &Default::default(), ).unwrap(); assert_matches!(v, ValidationResult::Invalid(InvalidCandidate::BadReturn)); @@ -884,7 +953,7 @@ mod tests { #[test] fn candidate_validation_timeout_is_internal_error() { - let validation_data: PersistedValidationData = Default::default(); + let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; let pov = PoV { block_data: BlockData(vec![1; 32]) }; @@ -892,7 +961,7 @@ mod tests { descriptor.pov_hash = pov.hash(); collator_sign(&mut descriptor, Sr25519Keyring::Alice); - assert!(perform_basic_checks(&descriptor, Some(1024), &pov).is_ok()); + assert!(perform_basic_checks(&descriptor, validation_data.max_pov_size, &pov).is_ok()); let v = validate_candidate_exhaustive::( MockValidationArg { @@ -905,6 +974,7 @@ mod tests { descriptor, Arc::new(pov), TaskExecutor::new(), + &Default::default(), ); assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::Timeout))); diff --git a/node/core/chain-api/Cargo.toml b/node/core/chain-api/Cargo.toml index 23f6ed4dede4d185738e279faeeb31ca623af5fa..ecd985c05f054c2ceff1983f9e46e3daa5e15b55 100644 --- a/node/core/chain-api/Cargo.toml +++ b/node/core/chain-api/Cargo.toml @@ -5,14 +5,16 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = { version = "0.3.5" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] -futures = { version = "0.3.5", features = ["thread-pool"] } +futures = { version = "0.3.8", features = ["thread-pool"] } maplit = "1.0.2" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/core/chain-api/src/lib.rs b/node/core/chain-api/src/lib.rs index 82be6d846778d4dc9515d51c17425cac81a9d56e..aa8b8ae6e951c56d1a56f3e18b84391365d8337e 100644 --- a/node/core/chain-api/src/lib.rs +++ b/node/core/chain-api/src/lib.rs @@ -44,6 +44,8 @@ use std::sync::Arc; use futures::prelude::*; +const LOG_TARGET: &str = "chain_api"; + /// The Chain API Subsystem implementation. pub struct ChainApiSubsystem { client: Arc, @@ -67,7 +69,6 @@ impl Subsystem for ChainApiSubsystem where fn start(self, ctx: Context) -> SpawnedSubsystem { let future = run(ctx, self) .map_err(|e| SubsystemError::with_origin("chain-api", e)) - .map(|_| ()) .boxed(); SpawnedSubsystem { future, @@ -76,6 +77,7 @@ impl Subsystem for ChainApiSubsystem where } } +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, subsystem: ChainApiSubsystem, @@ -90,11 +92,13 @@ where FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {}, FromOverseer::Communication { msg } => match msg { ChainApiMessage::BlockNumber(hash, response_channel) => { + let _timer = subsystem.metrics.time_block_number(); let result = subsystem.client.number(hash).map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, ChainApiMessage::BlockHeader(hash, response_channel) => { + let _timer = subsystem.metrics.time_block_header(); let result = subsystem.client .header(BlockId::Hash(hash)) .map_err(|e| e.to_string().into()); @@ -102,18 +106,23 @@ where let _ = response_channel.send(result); }, ChainApiMessage::FinalizedBlockHash(number, response_channel) => { + let _timer = subsystem.metrics.time_finalized_block_hash(); // Note: we don't verify it's finalized let result = subsystem.client.hash(number).map_err(|e| e.to_string().into()); subsystem.metrics.on_request(result.is_ok()); let _ = response_channel.send(result); }, ChainApiMessage::FinalizedBlockNumber(response_channel) => { + let _timer = subsystem.metrics.time_finalized_block_number(); let result = subsystem.client.info().finalized_number; // always succeeds subsystem.metrics.on_request(true); let _ = response_channel.send(Ok(result)); }, ChainApiMessage::Ancestors { hash, k, response_channel } => { + let _timer = subsystem.metrics.time_ancestors(); + tracing::span!(tracing::Level::TRACE, "ChainApiMessage::Ancestors", subsystem=LOG_TARGET, hash=%hash, k=k); + let mut hash = hash; let next_parent = core::iter::from_fn(|| { @@ -150,6 +159,11 @@ where #[derive(Clone)] struct MetricsInner { chain_api_requests: prometheus::CounterVec, + block_number: prometheus::Histogram, + block_header: prometheus::Histogram, + finalized_block_hash: prometheus::Histogram, + finalized_block_number: prometheus::Histogram, + ancestors: prometheus::Histogram, } /// Chain API metrics. @@ -166,6 +180,31 @@ impl Metrics { } } } + + /// Provide a timer for `block_number` which observes on drop. + fn time_block_number(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.block_number.start_timer()) + } + + /// Provide a timer for `block_header` which observes on drop. + fn time_block_header(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.block_header.start_timer()) + } + + /// Provide a timer for `finalized_block_hash` which observes on drop. + fn time_finalized_block_hash(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.finalized_block_hash.start_timer()) + } + + /// Provide a timer for `finalized_block_number` which observes on drop. + fn time_finalized_block_number(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.finalized_block_number.start_timer()) + } + + /// Provide a timer for `ancestors` which observes on drop. + fn time_ancestors(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.ancestors.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -181,6 +220,51 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + block_number: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_chain_api_block_number", + "Time spent within `chain_api::block_number`", + ) + )?, + registry, + )?, + block_header: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_chain_api_block_headers", + "Time spent within `chain_api::block_headers`", + ) + )?, + registry, + )?, + finalized_block_hash: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_chain_api_finalized_block_hash", + "Time spent within `chain_api::finalized_block_hash`", + ) + )?, + registry, + )?, + finalized_block_number: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_chain_api_finalized_block_number", + "Time spent within `chain_api::finalized_block_number`", + ) + )?, + registry, + )?, + ancestors: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_chain_api_ancestors", + "Time spent within `chain_api::ancestors`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/node/core/proposer/Cargo.toml b/node/core/proposer/Cargo.toml index b565f7c8a689d5b97f5efd0cf24c2fe1ce75a97b..0f7600ffe4c068abf9012d1fd43b346846336411 100644 --- a/node/core/proposer/Cargo.toml +++ b/node/core/proposer/Cargo.toml @@ -5,20 +5,20 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.4" -log = "0.4.8" +futures = "0.3.8" +futures-timer = "3.0.2" +tracing = "0.1.22" polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "master" } -wasm-timer = "0.2.4" +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/core/proposer/src/lib.rs b/node/core/proposer/src/lib.rs index e32c44ec4b0e713cd7efe9e5f7157567f3e20390..e5b4c61341792901955da218ef71465ae35f297e 100644 --- a/node/core/proposer/src/lib.rs +++ b/node/core/proposer/src/lib.rs @@ -37,7 +37,7 @@ use prometheus_endpoint::Registry as PrometheusRegistry; use std::{fmt, pin::Pin, sync::Arc, time}; /// How long proposal can take before we give up and err out -const PROPOSE_TIMEOUT: core::time::Duration = core::time::Duration::from_secs(2); +const PROPOSE_TIMEOUT: core::time::Duration = core::time::Duration::from_millis(2500); /// Custom Proposer factory for Polkadot pub struct ProposerFactory { @@ -136,38 +136,29 @@ where /// Get provisioner inherent data /// /// This function has a constant timeout: `PROPOSE_TIMEOUT`. - fn get_provisioner_data(&self) -> impl Future> { + async fn get_provisioner_data(&self) -> Result { // clone this (lightweight) data because we're going to move it into the future let mut overseer = self.overseer.clone(); let parent_header_hash = self.parent_header_hash.clone(); - let mut provisioner_inherent_data = async move { + let pid = async { let (sender, receiver) = futures::channel::oneshot::channel(); - - overseer.wait_for_activation(parent_header_hash, sender).await?; + overseer.wait_for_activation(parent_header_hash, sender).await; receiver.await.map_err(|_| Error::ClosedChannelAwaitingActivation)??; let (sender, receiver) = futures::channel::oneshot::channel(); - // strictly speaking, we don't _have_ to .await this send_msg before opening the - // receiver; it's possible that the response there would be ready slightly before - // this call completes. IMO it's not worth the hassle or overhead of spawning a - // distinct task for that kind of miniscule efficiency improvement. overseer.send_msg(AllMessages::Provisioner( ProvisionerMessage::RequestInherentData(parent_header_hash, sender), - )).await?; + )).await; receiver.await.map_err(|_| Error::ClosedChannelAwaitingInherentData) - } - .boxed() - .fuse(); + }; - let mut timeout = wasm_timer::Delay::new(PROPOSE_TIMEOUT).fuse(); + let mut timeout = futures_timer::Delay::new(PROPOSE_TIMEOUT).fuse(); - async move { - select! { - pid = provisioner_inherent_data => pid, - _ = timeout => Err(Error::Timeout), - } + select! { + pid = pid.fuse() => pid, + _ = timeout => Err(Error::Timeout), } } } @@ -201,13 +192,11 @@ where max_duration: time::Duration, record_proof: RecordProof, ) -> Self::Proposal { - let provisioner_data = self.get_provisioner_data(); - async move { - let provisioner_data = match provisioner_data.await { + let provisioner_data = match self.get_provisioner_data().await { Ok(pd) => pd, Err(err) => { - log::warn!("could not get provisioner inherent data; injecting default data: {}", err); + tracing::warn!(err = ?err, "could not get provisioner inherent data; injecting default data"); Default::default() } }; diff --git a/node/core/provisioner/Cargo.toml b/node/core/provisioner/Cargo.toml index a44fae43f52af95d306fb18bc082a455be88c643..7339e4b60aac8d8d068893710c6f22f07c51fdcc 100644 --- a/node/core/provisioner/Cargo.toml +++ b/node/core/provisioner/Cargo.toml @@ -6,16 +6,15 @@ edition = "2018" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -futures = "0.3.5" -log = "0.4.11" -thiserror = "1.0.21" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" +thiserror = "1.0.22" polkadot-primitives = { path = "../../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } +futures-timer = "3.0.2" [dev-dependencies] -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -futures-timer = "3.0.2" -tempfile = "3.1.0" +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 3fe479768aaf2af10f40cee10df693dad32e2d6c..4e985a262208478f4ca9f40ad54679226025562f 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -17,7 +17,7 @@ //! The provisioner is responsible for assembling a relay chain block //! from a set of available parachain candidates of its choice. -#![deny(missing_docs, unused_crate_dependencies, unused_results)] +#![deny(missing_docs, unused_crate_dependencies)] use bitvec::vec::BitVec; use futures::{ @@ -27,93 +27,69 @@ use futures::{ use polkadot_node_subsystem::{ errors::{ChainApiError, RuntimeApiError}, messages::{ - AllMessages, ChainApiMessage, ProvisionableData, ProvisionerInherentData, - ProvisionerMessage, RuntimeApiMessage, + AllMessages, CandidateBackingMessage, ChainApiMessage, ProvisionableData, ProvisionerInherentData, + ProvisionerMessage, }, }; use polkadot_node_subsystem_util::{ - self as util, - delegated_subsystem, - request_availability_cores, request_persisted_validation_data, JobTrait, ToJobTrait, - metrics::{self, prometheus}, + self as util, delegated_subsystem, FromJobCommand, + request_availability_cores, request_persisted_validation_data, JobTrait, metrics::{self, prometheus}, }; use polkadot_primitives::v1::{ - BackedCandidate, BlockNumber, CoreState, Hash, OccupiedCoreAssumption, - SignedAvailabilityBitfield, + BackedCandidate, BlockNumber, CandidateReceipt, CoreState, Hash, OccupiedCoreAssumption, + SignedAvailabilityBitfield, ValidatorIndex, }; -use std::{collections::HashMap, convert::TryFrom, pin::Pin}; +use std::{pin::Pin, collections::BTreeMap}; use thiserror::Error; +use futures_timer::Delay; -struct ProvisioningJob { - relay_parent: Hash, - sender: mpsc::Sender, - receiver: mpsc::Receiver, - provisionable_data_channels: Vec>, - backed_candidates: Vec, - signed_bitfields: Vec, - metrics: Metrics, -} +/// How long to wait before proposing. +const PRE_PROPOSE_TIMEOUT: std::time::Duration = core::time::Duration::from_millis(2000); -/// This enum defines the messages that the provisioner is prepared to receive. -pub enum ToJob { - /// The provisioner message is the main input to the provisioner. - Provisioner(ProvisionerMessage), - /// This message indicates that the provisioner should shut itself down. - Stop, -} - -impl ToJobTrait for ToJob { - const STOP: Self = Self::Stop; +const LOG_TARGET: &str = "provisioner"; - fn relay_parent(&self) -> Option { - match self { - Self::Provisioner(pm) => pm.relay_parent(), - Self::Stop => None, - } - } +enum InherentAfter { + Ready, + Wait(Delay), } -impl TryFrom for ToJob { - type Error = (); - - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::Provisioner(pm) => Ok(Self::Provisioner(pm)), - _ => Err(()), - } +impl InherentAfter { + fn new_from_now() -> Self { + InherentAfter::Wait(Delay::new(PRE_PROPOSE_TIMEOUT)) } -} -impl From for ToJob { - fn from(pm: ProvisionerMessage) -> Self { - Self::Provisioner(pm) + fn is_ready(&self) -> bool { + match *self { + InherentAfter::Ready => true, + InherentAfter::Wait(_) => false, + } } -} - -enum FromJob { - ChainApi(ChainApiMessage), - Runtime(RuntimeApiMessage), -} -impl From for AllMessages { - fn from(from_job: FromJob) -> AllMessages { - match from_job { - FromJob::ChainApi(cam) => AllMessages::ChainApi(cam), - FromJob::Runtime(ram) => AllMessages::RuntimeApi(ram), + async fn ready(&mut self) { + match *self { + InherentAfter::Ready => { + // Make sure we never end the returned future. + // This is required because the `select!` that calls this future will end in a busy loop. + futures::pending!() + }, + InherentAfter::Wait(ref mut d) => { + d.await; + *self = InherentAfter::Ready; + }, } } } -impl TryFrom for FromJob { - type Error = (); - - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::ChainApi(chain) => Ok(FromJob::ChainApi(chain)), - AllMessages::RuntimeApi(runtime) => Ok(FromJob::Runtime(runtime)), - _ => Err(()), - } - } +struct ProvisioningJob { + relay_parent: Hash, + sender: mpsc::Sender, + receiver: mpsc::Receiver, + provisionable_data_channels: Vec>, + backed_candidates: Vec, + signed_bitfields: Vec, + metrics: Metrics, + inherent_after: InherentAfter, + awaiting_inherent: Vec> } #[derive(Debug, Error)] @@ -121,8 +97,17 @@ enum Error { #[error(transparent)] Util(#[from] util::Error), - #[error(transparent)] - OneshotRecv(#[from] oneshot::Canceled), + #[error("failed to get availability cores")] + CanceledAvailabilityCores(#[source] oneshot::Canceled), + + #[error("failed to get persisted validation data")] + CanceledPersistedValidationData(#[source] oneshot::Canceled), + + #[error("failed to get block number")] + CanceledBlockNumber(#[source] oneshot::Canceled), + + #[error("failed to get backed candidates")] + CanceledBackedCandidates(#[source] oneshot::Canceled), #[error(transparent)] ChainApi(#[from] ChainApiError), @@ -130,16 +115,21 @@ enum Error { #[error(transparent)] Runtime(#[from] RuntimeApiError), - #[error("Failed to send message to ChainAPI")] + #[error("failed to send message to ChainAPI")] ChainApiMessageSend(#[source] mpsc::SendError), - #[error("Failed to send return message with Inherents")] + #[error("failed to send message to CandidateBacking to get backed candidates")] + GetBackedCandidatesSend(#[source] mpsc::SendError), + + #[error("failed to send return message with Inherents")] InherentDataReturnChannel, + + #[error("backed candidate does not correspond to selected candidate; check logic in provisioner")] + BackedCandidateOrderingProblem, } impl JobTrait for ProvisioningJob { - type ToJob = ToJob; - type FromJob = FromJob; + type ToJob = ProvisionerMessage; type Error = Error; type RunArgs = (); type Metrics = Metrics; @@ -149,15 +139,21 @@ impl JobTrait for ProvisioningJob { /// Run a job for the parent block indicated // // this function is in charge of creating and executing the job's main loop + #[tracing::instrument(skip(_run_args, metrics, receiver, sender), fields(subsystem = LOG_TARGET))] fn run( relay_parent: Hash, _run_args: Self::RunArgs, metrics: Self::Metrics, - receiver: mpsc::Receiver, - sender: mpsc::Sender, + receiver: mpsc::Receiver, + sender: mpsc::Sender, ) -> Pin> + Send>> { async move { - let job = ProvisioningJob::new(relay_parent, metrics, sender, receiver); + let job = ProvisioningJob::new( + relay_parent, + metrics, + sender, + receiver, + ); // it isn't necessary to break run_loop into its own function, // but it's convenient to separate the concerns in this way @@ -171,8 +167,8 @@ impl ProvisioningJob { pub fn new( relay_parent: Hash, metrics: Metrics, - sender: mpsc::Sender, - receiver: mpsc::Receiver, + sender: mpsc::Sender, + receiver: mpsc::Receiver, ) -> Self { Self { relay_parent, @@ -182,75 +178,101 @@ impl ProvisioningJob { backed_candidates: Vec::new(), signed_bitfields: Vec::new(), metrics, + inherent_after: InherentAfter::new_from_now(), + awaiting_inherent: Vec::new(), } } async fn run_loop(mut self) -> Result<(), Error> { - while let Some(msg) = self.receiver.next().await { - use ProvisionerMessage::{ - ProvisionableData, RequestBlockAuthorshipData, RequestInherentData, - }; - - match msg { - ToJob::Provisioner(RequestInherentData(_, return_sender)) => { - if let Err(err) = send_inherent_data( - self.relay_parent, - &self.signed_bitfields, - &self.backed_candidates, - return_sender, - self.sender.clone(), - ) - .await - { - log::warn!(target: "provisioner", "failed to assemble or send inherent data: {:?}", err); - self.metrics.on_inherent_data_request(Err(())); - } else { - self.metrics.on_inherent_data_request(Ok(())); - } - } - ToJob::Provisioner(RequestBlockAuthorshipData(_, sender)) => { - self.provisionable_data_channels.push(sender) - } - ToJob::Provisioner(ProvisionableData(data)) => { - let mut bad_indices = Vec::new(); - for (idx, channel) in self.provisionable_data_channels.iter_mut().enumerate() { - match channel.send(data.clone()).await { - Ok(_) => {} - Err(_) => bad_indices.push(idx), + use ProvisionerMessage::{ + ProvisionableData, RequestBlockAuthorshipData, RequestInherentData, + }; + + loop { + futures::select! { + msg = self.receiver.next().fuse() => match msg { + Some(RequestInherentData(_, return_sender)) => { + let _timer = self.metrics.time_request_inherent_data(); + + if self.inherent_after.is_ready() { + self.send_inherent_data(vec![return_sender]).await; + } else { + self.awaiting_inherent.push(return_sender); } } - self.note_provisionable_data(data); - - // clean up our list of channels by removing the bad indices - // start by reversing it for efficient pop - bad_indices.reverse(); - // Vec::retain would be nicer here, but it doesn't provide - // an easy API for retaining by index, so we re-collect instead. - self.provisionable_data_channels = self - .provisionable_data_channels - .into_iter() - .enumerate() - .filter(|(idx, _)| { - if bad_indices.is_empty() { - return true; - } - let tail = bad_indices[bad_indices.len() - 1]; - let retain = *idx != tail; - if *idx >= tail { - let _ = bad_indices.pop(); + Some(RequestBlockAuthorshipData(_, sender)) => { + self.provisionable_data_channels.push(sender) + } + Some(ProvisionableData(_, data)) => { + let _timer = self.metrics.time_provisionable_data(); + + let mut bad_indices = Vec::new(); + for (idx, channel) in self.provisionable_data_channels.iter_mut().enumerate() { + match channel.send(data.clone()).await { + Ok(_) => {} + Err(_) => bad_indices.push(idx), } - retain - }) - .map(|(_, item)| item) - .collect(); + } + self.note_provisionable_data(data); + + // clean up our list of channels by removing the bad indices + // start by reversing it for efficient pop + bad_indices.reverse(); + // Vec::retain would be nicer here, but it doesn't provide + // an easy API for retaining by index, so we re-collect instead. + self.provisionable_data_channels = self + .provisionable_data_channels + .into_iter() + .enumerate() + .filter(|(idx, _)| { + if bad_indices.is_empty() { + return true; + } + let tail = bad_indices[bad_indices.len() - 1]; + let retain = *idx != tail; + if *idx >= tail { + let _ = bad_indices.pop(); + } + retain + }) + .map(|(_, item)| item) + .collect(); + } + None => break, + }, + _ = self.inherent_after.ready().fuse() => { + let return_senders = std::mem::take(&mut self.awaiting_inherent); + if !return_senders.is_empty() { + self.send_inherent_data(return_senders).await; + } } - ToJob::Stop => break, } } Ok(()) } + async fn send_inherent_data( + &mut self, + return_senders: Vec>, + ) { + if let Err(err) = send_inherent_data( + self.relay_parent, + &self.signed_bitfields, + &self.backed_candidates, + return_senders, + &mut self.sender, + ) + .await + { + tracing::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data"); + self.metrics.on_inherent_data_request(Err(())); + } else { + self.metrics.on_inherent_data_request(Ok(())); + } + } + + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_provisionable_data(&mut self, provisionable_data: ProvisionableData) { match provisionable_data { ProvisionableData::Bitfield(_, signed_bitfield) => { @@ -266,33 +288,34 @@ impl ProvisioningJob { type CoreAvailability = BitVec; -// The provisioner is the subsystem best suited to choosing which specific -// backed candidates and availability bitfields should be assembled into the -// block. To engage this functionality, a -// `ProvisionerMessage::RequestInherentData` is sent; the response is a set of -// non-conflicting candidates and the appropriate bitfields. Non-conflicting -// means that there are never two distinct parachain candidates included for -// the same parachain and that new parachain candidates cannot be included -// until the previous one either gets declared available or expired. -// -// The main complication here is going to be around handling -// occupied-core-assumptions. We might have candidates that are only -// includable when some bitfields are included. And we might have candidates -// that are not includable when certain bitfields are included. -// -// When we're choosing bitfields to include, the rule should be simple: -// maximize availability. So basically, include all bitfields. And then -// choose a coherent set of candidates along with that. +/// The provisioner is the subsystem best suited to choosing which specific +/// backed candidates and availability bitfields should be assembled into the +/// block. To engage this functionality, a +/// `ProvisionerMessage::RequestInherentData` is sent; the response is a set of +/// non-conflicting candidates and the appropriate bitfields. Non-conflicting +/// means that there are never two distinct parachain candidates included for +/// the same parachain and that new parachain candidates cannot be included +/// until the previous one either gets declared available or expired. +/// +/// The main complication here is going to be around handling +/// occupied-core-assumptions. We might have candidates that are only +/// includable when some bitfields are included. And we might have candidates +/// that are not includable when certain bitfields are included. +/// +/// When we're choosing bitfields to include, the rule should be simple: +/// maximize availability. So basically, include all bitfields. And then +/// choose a coherent set of candidates along with that. +#[tracing::instrument(level = "trace", skip(return_senders, from_job), fields(subsystem = LOG_TARGET))] async fn send_inherent_data( relay_parent: Hash, bitfields: &[SignedAvailabilityBitfield], - candidates: &[BackedCandidate], - return_sender: oneshot::Sender, - mut from_job: mpsc::Sender, + candidates: &[CandidateReceipt], + return_senders: Vec>, + from_job: &mut mpsc::Sender, ) -> Result<(), Error> { - let availability_cores = request_availability_cores(relay_parent, &mut from_job) + let availability_cores = request_availability_cores(relay_parent, from_job) .await? - .await??; + .await.map_err(|err| Error::CanceledAvailabilityCores(err))??; let bitfields = select_availability_bitfields(&availability_cores, bitfields); let candidates = select_candidates( @@ -300,64 +323,67 @@ async fn send_inherent_data( &bitfields, candidates, relay_parent, - &mut from_job, + from_job, ) .await?; - return_sender - .send((bitfields, candidates)) - .map_err(|_data| Error::InherentDataReturnChannel)?; + let res = (bitfields, candidates); + for return_sender in return_senders { + return_sender.send(res.clone()).map_err(|_data| Error::InherentDataReturnChannel)?; + } + Ok(()) } -// in general, we want to pick all the bitfields. However, we have the following constraints: -// -// - not more than one per validator -// - each must correspond to an occupied core -// -// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability, -// we pick the one with the greatest number of 1 bits. -// -// note: this does not enforce any sorting precondition on the output; the ordering there will be unrelated -// to the sorting of the input. +/// In general, we want to pick all the bitfields. However, we have the following constraints: +/// +/// - not more than one per validator +/// - each 1 bit must correspond to an occupied core +/// +/// If we have too many, an arbitrary selection policy is fine. For purposes of maximizing availability, +/// we pick the one with the greatest number of 1 bits. +/// +/// Note: This does not enforce any sorting precondition on the output; the ordering there will be unrelated +/// to the sorting of the input. +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn select_availability_bitfields( cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], ) -> Vec { - let mut fields_by_core: HashMap<_, Vec<_>> = HashMap::new(); - for bitfield in bitfields.iter() { - let core_idx = bitfield.validator_index() as usize; - if let CoreState::Occupied(_) = cores[core_idx] { - fields_by_core - .entry(core_idx) - // there cannot be a value list in field_by_core with len < 1 - .or_default() - .push(bitfield.clone()); + let mut selected: BTreeMap = BTreeMap::new(); + + 'a: + for bitfield in bitfields.iter().cloned() { + if bitfield.payload().0.len() != cores.len() { + continue } - } - let mut out = Vec::with_capacity(fields_by_core.len()); - for (_, core_bitfields) in fields_by_core.iter_mut() { - core_bitfields.sort_by_key(|bitfield| bitfield.payload().0.count_ones()); - out.push( - core_bitfields - .pop() - .expect("every core bitfield has at least 1 member; qed"), - ); + let is_better = selected.get(&bitfield.validator_index()) + .map_or(true, |b| b.payload().0.count_ones() < bitfield.payload().0.count_ones()); + + if !is_better { continue } + + for (idx, _) in cores.iter().enumerate().filter(|v| !v.1.is_occupied()) { + // Bit is set for an unoccupied core - invalid + if *bitfield.payload().0.get(idx).unwrap_or(&false) { + continue 'a + } + } + + let _ = selected.insert(bitfield.validator_index(), bitfield); } - out + selected.into_iter().map(|(_, b)| b).collect() } -// determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. -// -// follow the candidate selection algorithm from the guide +/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], - candidates: &[BackedCandidate], + candidates: &[CandidateReceipt], relay_parent: Hash, - sender: &mut mpsc::Sender, + sender: &mut mpsc::Sender, ) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; @@ -368,8 +394,7 @@ async fn select_candidates( let (scheduled_core, assumption) = match core { CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free), CoreState::Occupied(occupied_core) => { - if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) - { + if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) { if let Some(ref scheduled_core) = occupied_core.next_up_on_available { (scheduled_core, OccupiedCoreAssumption::Included) } else { @@ -396,7 +421,7 @@ async fn select_candidates( sender, ) .await? - .await?? + .await.map_err(|err| Error::CanceledPersistedValidationData(err))?? { Some(v) => v, None => continue, @@ -406,51 +431,77 @@ async fn select_candidates( // we arbitrarily pick the first of the backed candidates which match the appropriate selection criteria if let Some(candidate) = candidates.iter().find(|backed_candidate| { - let descriptor = &backed_candidate.candidate.descriptor; + let descriptor = &backed_candidate.descriptor; descriptor.para_id == scheduled_core.para_id && descriptor.persisted_validation_data_hash == computed_validation_data_hash }) { - selected_candidates.push(candidate.clone()); + selected_candidates.push(candidate.hash()); + } + } + + // now get the backed candidates corresponding to these candidate receipts + let (tx, rx) = oneshot::channel(); + sender.send(AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( + relay_parent, + selected_candidates.clone(), + tx, + )).into()).await.map_err(|err| Error::GetBackedCandidatesSend(err))?; + let candidates = rx.await.map_err(|err| Error::CanceledBackedCandidates(err))?; + + // `selected_candidates` is generated in ascending order by core index, and `GetBackedCandidates` + // _should_ preserve that property, but let's just make sure. + // + // We can't easily map from `BackedCandidate` to `core_idx`, but we know that every selected candidate + // maps to either 0 or 1 backed candidate, and the hashes correspond. Therefore, by checking them + // in order, we can ensure that the backed candidates are also in order. + let mut backed_idx = 0; + for selected in selected_candidates { + if selected == candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() { + backed_idx += 1; } } + if candidates.len() != backed_idx { + Err(Error::BackedCandidateOrderingProblem)?; + } - Ok(selected_candidates) + Ok(candidates) } -// produces a block number 1 higher than that of the relay parent -// in the event of an invalid `relay_parent`, returns `Ok(0)` +/// Produces a block number 1 higher than that of the relay parent +/// in the event of an invalid `relay_parent`, returns `Ok(0)` +#[tracing::instrument(level = "trace", skip(sender), fields(subsystem = LOG_TARGET))] async fn get_block_number_under_construction( relay_parent: Hash, - sender: &mut mpsc::Sender, + sender: &mut mpsc::Sender, ) -> Result { let (tx, rx) = oneshot::channel(); sender - .send(FromJob::ChainApi(ChainApiMessage::BlockNumber( + .send(AllMessages::from(ChainApiMessage::BlockNumber( relay_parent, tx, - ))) + )).into()) .await .map_err(|e| Error::ChainApiMessageSend(e))?; - match rx.await? { + match rx.await.map_err(|err| Error::CanceledBlockNumber(err))? { Ok(Some(n)) => Ok(n + 1), Ok(None) => Ok(0), Err(err) => Err(err.into()), } } -// the availability bitfield for a given core is the transpose -// of a set of signed availability bitfields. It goes like this: -// -// - construct a transverse slice along `core_idx` -// - bitwise-or it with the availability slice -// - count the 1 bits, compare to the total length; true on 2/3+ +/// The availability bitfield for a given core is the transpose +/// of a set of signed availability bitfields. It goes like this: +/// +/// - construct a transverse slice along `core_idx` +/// - bitwise-or it with the availability slice +/// - count the 1 bits, compare to the total length; true on 2/3+ +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn bitfields_indicate_availability( core_idx: usize, bitfields: &[SignedAvailabilityBitfield], availability: &CoreAvailability, ) -> bool { let mut availability = availability.clone(); - // we need to pre-compute this to avoid a borrow-immutable-while-borrowing-mutable error in the error message let availability_len = availability.len(); for bitfield in bitfields { @@ -460,18 +511,29 @@ fn bitfields_indicate_availability( // in principle, this function might return a `Result` so that we can more clearly express this error condition // however, in practice, that would just push off an error-handling routine which would look a whole lot like this one. // simpler to just handle the error internally here. - log::warn!(target: "provisioner", "attempted to set a transverse bit at idx {} which is greater than bitfield size {}", validator_idx, availability_len); + tracing::warn!( + target: LOG_TARGET, + validator_idx = %validator_idx, + availability_len = %availability_len, + "attempted to set a transverse bit at idx {} which is greater than bitfield size {}", + validator_idx, + availability_len, + ); + return false; } Some(mut bit_mut) => *bit_mut |= bitfield.payload().0[core_idx], } } + 3 * availability.count_ones() >= 2 * availability.len() } #[derive(Clone)] struct MetricsInner { inherent_data_requests: prometheus::CounterVec, + request_inherent_data: prometheus::Histogram, + provisionable_data: prometheus::Histogram, } /// Provisioner metrics. @@ -487,6 +549,16 @@ impl Metrics { } } } + + /// Provide a timer for `request_inherent_data` which observes on drop. + fn time_request_inherent_data(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.request_inherent_data.start_timer()) + } + + /// Provide a timer for `provisionable_data` which observes on drop. + fn time_provisionable_data(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.provisionable_data.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -502,13 +574,31 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + request_inherent_data: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_provisioner_request_inherent_data", + "Time spent within `provisioner::request_inherent_data`", + ) + )?, + registry, + )?, + provisionable_data: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_provisioner_provisionable_data", + "Time spent within `provisioner::provisionable_data`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } -delegated_subsystem!(ProvisioningJob((), Metrics) <- ToJob as ProvisioningSubsystem); +delegated_subsystem!(ProvisioningJob((), Metrics) <- ProvisionerMessage as ProvisioningSubsystem); #[cfg(test)] -mod tests; \ No newline at end of file +mod tests; diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index d0df7f5329f00f31a7135ead17dd13efd0df600c..cc3750591ee7af13ae0edbbe7c117d1d6060660e 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -10,7 +10,7 @@ pub fn occupied_core(para_id: u32) -> CoreState { occupied_since: 100_u32, time_out_at: 200_u32, next_up_on_time_out: None, - availability: default_bitvec(), + availability: bitvec![bitvec::order::Lsb0, u8; 0; 32], }) } @@ -28,8 +28,8 @@ where CoreState::Occupied(core) } -pub fn default_bitvec() -> CoreAvailability { - bitvec![bitvec::order::Lsb0, u8; 0; 32] +pub fn default_bitvec(n_cores: usize) -> CoreAvailability { + bitvec![bitvec::order::Lsb0, u8; 0; n_cores] } pub fn scheduled_core(id: u32) -> ScheduledCore { @@ -46,8 +46,7 @@ mod select_availability_bitfields { use std::sync::Arc; use polkadot_primitives::v1::{SigningContext, ValidatorIndex, ValidatorId}; use sp_application_crypto::AppKey; - use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; - use sc_keystore::LocalKeystore; + use sp_keystore::{CryptoStore, SyncCryptoStorePtr, testing::KeyStore}; async fn signed_bitfield( keystore: &SyncCryptoStorePtr, @@ -68,12 +67,10 @@ mod select_availability_bitfields { #[test] fn not_more_than_one_per_validator() { - // Configure filesystem-based keystore as generating keys without seed - // would trigger the key to be generated on the filesystem. - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); - let bitvec = default_bitvec(); + let keystore: SyncCryptoStorePtr = Arc::new(KeyStore::new()); + let mut bitvec = default_bitvec(2); + bitvec.set(0, true); + bitvec.set(1, true); let cores = vec![occupied_core(0), occupied_core(1)]; @@ -96,71 +93,113 @@ mod select_availability_bitfields { #[test] fn each_corresponds_to_an_occupied_core() { - // Configure filesystem-based keystore as generating keys without seed - // would trigger the key to be generated on the filesystem. - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); - let bitvec = default_bitvec(); + let keystore: SyncCryptoStorePtr = Arc::new(KeyStore::new()); + let bitvec = default_bitvec(3); - let cores = vec![CoreState::Free, CoreState::Scheduled(Default::default())]; + // invalid: bit on free core + let mut bitvec0 = bitvec.clone(); + bitvec0.set(0, true); + + // invalid: bit on scheduled core + let mut bitvec1 = bitvec.clone(); + bitvec1.set(1, true); + + // valid: bit on occupied core. + let mut bitvec2 = bitvec.clone(); + bitvec2.set(2, true); + + let cores = vec![ + CoreState::Free, + CoreState::Scheduled(Default::default()), + occupied_core(2), + ]; let bitfields = vec![ - block_on(signed_bitfield(&keystore, bitvec.clone(), 0)), - block_on(signed_bitfield(&keystore, bitvec.clone(), 1)), - block_on(signed_bitfield(&keystore, bitvec, 1)), + block_on(signed_bitfield(&keystore, bitvec0, 0)), + block_on(signed_bitfield(&keystore, bitvec1, 1)), + block_on(signed_bitfield(&keystore, bitvec2.clone(), 2)), ]; - let mut selected_bitfields = select_availability_bitfields(&cores, &bitfields); - selected_bitfields.sort_by_key(|bitfield| bitfield.validator_index()); + let selected_bitfields = select_availability_bitfields(&cores, &bitfields); - // bitfields not corresponding to occupied cores are not selected - assert!(selected_bitfields.is_empty()); + // selects only the valid bitfield + assert_eq!(selected_bitfields.len(), 1); + assert_eq!(selected_bitfields[0].payload().0, bitvec2); } #[test] fn more_set_bits_win_conflicts() { - // Configure filesystem-based keystore as generating keys without seed - // would trigger the key to be generated on the filesystem. - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); - let bitvec_zero = default_bitvec(); - let bitvec_one = { - let mut bitvec = bitvec_zero.clone(); - bitvec.set(0, true); - bitvec - }; + let keystore: SyncCryptoStorePtr = Arc::new(KeyStore::new()); + let mut bitvec = default_bitvec(2); + bitvec.set(0, true); - let cores = vec![occupied_core(0)]; + let mut bitvec1 = bitvec.clone(); + bitvec1.set(1, true); + + let cores = vec![occupied_core(0), occupied_core(1)]; let bitfields = vec![ - block_on(signed_bitfield(&keystore, bitvec_zero, 0)), - block_on(signed_bitfield(&keystore, bitvec_one.clone(), 0)), + block_on(signed_bitfield(&keystore, bitvec, 1)), + block_on(signed_bitfield(&keystore, bitvec1.clone(), 1)), ]; - // this test is probablistic: chances are excellent that it does what it claims to. - // it cannot fail unless things are broken. - // however, there is a (very small) chance that it passes when things are broken. - for _ in 0..64 { - let selected_bitfields = select_availability_bitfields(&cores, &bitfields); - assert_eq!(selected_bitfields.len(), 1); - assert_eq!(selected_bitfields[0].payload().0, bitvec_one); - } + let selected_bitfields = select_availability_bitfields(&cores, &bitfields); + assert_eq!(selected_bitfields.len(), 1); + assert_eq!(selected_bitfields[0].payload().0, bitvec1.clone()); + } + + #[test] + fn more_complex_bitfields() { + let keystore: SyncCryptoStorePtr = Arc::new(KeyStore::new()); + + let cores = vec![occupied_core(0), occupied_core(1), occupied_core(2), occupied_core(3)]; + + let mut bitvec0 = default_bitvec(4); + bitvec0.set(0, true); + bitvec0.set(2, true); + + let mut bitvec1 = default_bitvec(4); + bitvec1.set(1, true); + + let mut bitvec2 = default_bitvec(4); + bitvec2.set(2, true); + + let mut bitvec3 = default_bitvec(4); + bitvec3.set(0, true); + bitvec3.set(1, true); + bitvec3.set(2, true); + bitvec3.set(3, true); + + // these are out of order but will be selected in order. The better + // bitfield for 3 will be selected. + let bitfields = vec![ + block_on(signed_bitfield(&keystore, bitvec2.clone(), 3)), + block_on(signed_bitfield(&keystore, bitvec3.clone(), 3)), + block_on(signed_bitfield(&keystore, bitvec0.clone(), 0)), + block_on(signed_bitfield(&keystore, bitvec2.clone(), 2)), + block_on(signed_bitfield(&keystore, bitvec1.clone(), 1)), + ]; + + let selected_bitfields = select_availability_bitfields(&cores, &bitfields); + assert_eq!(selected_bitfields.len(), 4); + assert_eq!(selected_bitfields[0].payload().0, bitvec0); + assert_eq!(selected_bitfields[1].payload().0, bitvec1); + assert_eq!(selected_bitfields[2].payload().0, bitvec2); + assert_eq!(selected_bitfields[3].payload().0, bitvec3); } } mod select_candidates { use futures_timer::Delay; use super::super::*; - use super::{build_occupied_core, default_bitvec, occupied_core, scheduled_core}; - use polkadot_node_subsystem::messages::RuntimeApiRequest::{ - AvailabilityCores, PersistedValidationData as PersistedValidationDataReq, + use super::{build_occupied_core, occupied_core, scheduled_core, default_bitvec}; + use polkadot_node_subsystem::messages::{ + AllMessages, RuntimeApiMessage, + RuntimeApiRequest::{AvailabilityCores, PersistedValidationData as PersistedValidationDataReq}, }; use polkadot_primitives::v1::{ - BlockNumber, CandidateDescriptor, CommittedCandidateReceipt, PersistedValidationData, + BlockNumber, CandidateDescriptor, PersistedValidationData, CommittedCandidateReceipt, CandidateCommitments, }; - use FromJob::{ChainApi, Runtime}; const BLOCK_UNDER_PRODUCTION: BlockNumber = 128; @@ -168,9 +207,9 @@ mod select_candidates { overseer_factory: OverseerFactory, test_factory: TestFactory, ) where - OverseerFactory: FnOnce(mpsc::Receiver) -> Overseer, + OverseerFactory: FnOnce(mpsc::Receiver) -> Overseer, Overseer: Future, - TestFactory: FnOnce(mpsc::Sender) -> Test, + TestFactory: FnOnce(mpsc::Sender) -> Test, Test: Future, { let (tx, rx) = mpsc::channel(64); @@ -258,38 +297,42 @@ mod select_candidates { ] } - async fn mock_overseer(mut receiver: mpsc::Receiver) { + async fn mock_overseer(mut receiver: mpsc::Receiver, expected: Vec) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; while let Some(from_job) = receiver.next().await { match from_job { - ChainApi(BlockNumber(_relay_parent, tx)) => { + FromJobCommand::SendMessage(AllMessages::ChainApi(BlockNumber(_relay_parent, tx))) => { tx.send(Ok(Some(BLOCK_UNDER_PRODUCTION - 1))).unwrap() } - Runtime(Request( + FromJobCommand::SendMessage(AllMessages::RuntimeApi(Request( _parent_hash, PersistedValidationDataReq(_para_id, _assumption, tx), - )) => tx.send(Ok(Some(Default::default()))).unwrap(), - Runtime(Request(_parent_hash, AvailabilityCores(tx))) => { + ))) => tx.send(Ok(Some(Default::default()))).unwrap(), + FromJobCommand::SendMessage(AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx)))) => { tx.send(Ok(mock_availability_cores())).unwrap() } - // non-exhaustive matches are fine for testing - _ => unimplemented!(), + FromJobCommand::SendMessage( + AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates(_, _, sender)) + ) => { + let _ = sender.send(expected.clone()); + } + _ => panic!("Unexpected message: {:?}", from_job), } } } #[test] fn handles_overseer_failure() { - let overseer = |rx: mpsc::Receiver| async move { + let overseer = |rx: mpsc::Receiver| async move { // drop the receiver so it closes and the sender can't send, then just sleep long enough that // this is almost certainly not the first of the two futures to complete std::mem::drop(rx); Delay::new(std::time::Duration::from_secs(1)).await; }; - let test = |mut tx: mpsc::Sender| async move { + let test = |mut tx: mpsc::Sender| async move { // wait so that the overseer can drop the rx before we attempt to send Delay::new(std::time::Duration::from_millis(50)).await; let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await; @@ -302,10 +345,8 @@ mod select_candidates { #[test] fn can_succeed() { - test_harness(mock_overseer, |mut tx: mpsc::Sender| async move { - let result = select_candidates(&[], &[], &[], Default::default(), &mut tx).await; - println!("{:?}", result); - assert!(result.is_ok()); + test_harness(|r| mock_overseer(r, Vec::new()), |mut tx: mpsc::Sender| async move { + select_candidates(&[], &[], &[], Default::default(), &mut tx).await.unwrap(); }) } @@ -315,26 +356,23 @@ mod select_candidates { #[test] fn selects_correct_candidates() { let mock_cores = mock_availability_cores(); + let n_cores = mock_cores.len(); let empty_hash = PersistedValidationData::::default().hash(); - let candidate_template = BackedCandidate { - candidate: CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - persisted_validation_data_hash: empty_hash, - ..Default::default() - }, + let candidate_template = CandidateReceipt { + descriptor: CandidateDescriptor { + persisted_validation_data_hash: empty_hash, ..Default::default() }, - validity_votes: Vec::new(), - validator_indices: default_bitvec(), + commitments_hash: CandidateCommitments::default().hash(), }; let candidates: Vec<_> = std::iter::repeat(candidate_template) .take(mock_cores.len()) .enumerate() .map(|(idx, mut candidate)| { - candidate.candidate.descriptor.para_id = idx.into(); + candidate.descriptor.para_id = idx.into(); candidate }) .cycle() @@ -346,12 +384,12 @@ mod select_candidates { candidate } else if idx < mock_cores.len() * 2 { // for the second repetition of the candidates, give them the wrong hash - candidate.candidate.descriptor.persisted_validation_data_hash + candidate.descriptor.persisted_validation_data_hash = Default::default(); candidate } else { // third go-around: right hash, wrong para_id - candidate.candidate.descriptor.para_id = idx.into(); + candidate.descriptor.para_id = idx.into(); candidate } }) @@ -363,15 +401,28 @@ mod select_candidates { .map(|&idx| candidates[idx].clone()) .collect(); - test_harness(mock_overseer, |mut tx: mpsc::Sender| async move { + let expected_backed = expected_candidates + .iter() + .map(|c| BackedCandidate { + candidate: CommittedCandidateReceipt { descriptor: c.descriptor.clone(), ..Default::default() }, + validity_votes: Vec::new(), + validator_indices: default_bitvec(n_cores), + }) + .collect(); + + test_harness(|r| mock_overseer(r, expected_backed), |mut tx: mpsc::Sender| async move { let result = select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx) - .await; - - if result.is_err() { - println!("{:?}", result); - } - assert_eq!(result.unwrap(), expected_candidates); + .await.unwrap(); + + result.into_iter() + .for_each(|c| + assert!( + expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + "Failed to find candidate: {:?}", + c, + ) + ); }) } } diff --git a/node/core/runtime-api/Cargo.toml b/node/core/runtime-api/Cargo.toml index 81cf95410976f16f6f36011fed581fd712410dc7..844b0763c70f62bddb7b6350f5ac952536b61b60 100644 --- a/node/core/runtime-api/Cargo.toml +++ b/node/core/runtime-api/Cargo.toml @@ -5,14 +5,17 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -futures = { version = "0.3.5", features = ["thread-pool"] } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +futures = { version = "0.3.8", features = ["thread-pool"] } polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 5a38730335b5361d2a2c46a8882895ac2c071902..b3b809296647eba692eb93a1b7a937e2ea183854 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -30,26 +30,44 @@ use polkadot_subsystem::{ }, errors::RuntimeApiError, }; -use polkadot_node_subsystem_util::{ - metrics::{self, prometheus}, -}; +use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_primitives::v1::{Block, BlockId, Hash, ParachainHost}; -use std::sync::Arc; -use sp_api::{ProvideRuntimeApi}; +use sp_api::ProvideRuntimeApi; +use sp_core::traits::SpawnNamed; + +use futures::{prelude::*, stream::FuturesUnordered, channel::oneshot, select}; +use std::{sync::Arc, collections::VecDeque, pin::Pin}; + +const LOG_TARGET: &str = "runtime_api"; -use futures::prelude::*; +/// The number of maximum runtime api requests can be executed in parallel. Further requests will be buffered. +const MAX_PARALLEL_REQUESTS: usize = 4; + +/// The name of the blocking task that executes a runtime api request. +const API_REQUEST_TASK_NAME: &str = "polkadot-runtime-api-request"; /// The `RuntimeApiSubsystem`. See module docs for more details. pub struct RuntimeApiSubsystem { client: Arc, metrics: Metrics, + spawn_handle: Box, + /// If there are [`MAX_PARALLEL_REQUESTS`] requests being executed, we buffer them in here until they can be executed. + waiting_requests: VecDeque<(Pin + Send>>, oneshot::Receiver<()>)>, + /// All the active runtime api requests that are currently being executed. + active_requests: FuturesUnordered>, } impl RuntimeApiSubsystem { /// Create a new Runtime API subsystem wrapping the given client and metrics. - pub fn new(client: Arc, metrics: Metrics) -> Self { - RuntimeApiSubsystem { client, metrics } + pub fn new(client: Arc, metrics: Metrics, spawn_handle: impl SpawnNamed + 'static) -> Self { + RuntimeApiSubsystem { + client, + metrics, + spawn_handle: Box::new(spawn_handle), + waiting_requests: Default::default(), + active_requests: Default::default(), + } } } @@ -60,45 +78,104 @@ impl Subsystem for RuntimeApiSubsystem where { fn start(self, ctx: Context) -> SpawnedSubsystem { SpawnedSubsystem { - future: run(ctx, self).map(|_| ()).boxed(), + future: run(ctx, self).boxed(), name: "runtime-api-subsystem", } } } +impl RuntimeApiSubsystem where + Client: ProvideRuntimeApi + Send + 'static + Sync, + Client::Api: ParachainHost, +{ + /// Spawn a runtime api request. + /// + /// If there are already [`MAX_PARALLEL_REQUESTS`] requests being executed, the request will be buffered. + fn spawn_request(&mut self, relay_parent: Hash, request: Request) { + let client = self.client.clone(); + let metrics = self.metrics.clone(); + let (sender, receiver) = oneshot::channel(); + + let request = async move { + make_runtime_api_request( + client, + metrics, + relay_parent, + request, + ); + let _ = sender.send(()); + }.boxed(); + + if self.active_requests.len() >= MAX_PARALLEL_REQUESTS { + self.waiting_requests.push_back((request, receiver)); + + if self.waiting_requests.len() > MAX_PARALLEL_REQUESTS * 10 { + tracing::warn!( + target: LOG_TARGET, + "{} runtime api requests waiting to be executed.", + self.waiting_requests.len(), + ) + } + } else { + self.spawn_handle.spawn_blocking(API_REQUEST_TASK_NAME, request); + self.active_requests.push(receiver); + } + } + + /// Poll the active runtime api requests. + async fn poll_requests(&mut self) { + // If there are no active requests, this future should be pending forever. + if self.active_requests.len() == 0 { + return futures::pending!() + } + + // If there are active requests, this will always resolve to `Some(_)` when a request is finished. + let _ = self.active_requests.next().await; + + if let Some((req, recv)) = self.waiting_requests.pop_front() { + self.spawn_handle.spawn_blocking(API_REQUEST_TASK_NAME, req); + self.active_requests.push(recv); + } + } +} + +#[tracing::instrument(skip(ctx, subsystem), fields(subsystem = LOG_TARGET))] async fn run( mut ctx: impl SubsystemContext, - subsystem: RuntimeApiSubsystem, + mut subsystem: RuntimeApiSubsystem, ) -> SubsystemResult<()> where - Client: ProvideRuntimeApi, + Client: ProvideRuntimeApi + Send + Sync + 'static, Client::Api: ParachainHost, { loop { - match ctx.recv().await? { - FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {}, - FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {}, - FromOverseer::Communication { msg } => match msg { - RuntimeApiMessage::Request(relay_parent, request) => make_runtime_api_request( - &*subsystem.client, - &subsystem.metrics, - relay_parent, - request, - ), - } + select! { + req = ctx.recv().fuse() => match req? { + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => {}, + FromOverseer::Signal(OverseerSignal::BlockFinalized(_)) => {}, + FromOverseer::Communication { msg } => match msg { + RuntimeApiMessage::Request(relay_parent, request) => { + subsystem.spawn_request(relay_parent, request); + }, + } + }, + _ = subsystem.poll_requests().fuse() => {}, } } } +#[tracing::instrument(level = "trace", skip(client, metrics), fields(subsystem = LOG_TARGET))] fn make_runtime_api_request( - client: &Client, - metrics: &Metrics, + client: Arc, + metrics: Metrics, relay_parent: Hash, request: Request, ) where Client: ProvideRuntimeApi, Client::Api: ParachainHost, { + let _timer = metrics.time_make_runtime_api_request(); + macro_rules! query { ($api_name:ident ($($param:expr),*), $sender:expr) => {{ let sender = $sender; @@ -123,17 +200,21 @@ fn make_runtime_api_request( Request::SessionIndexForChild(sender) => query!(session_index_for_child(), sender), Request::ValidationCode(para, assumption, sender) => query!(validation_code(para, assumption), sender), + Request::HistoricalValidationCode(para, at, sender) => + query!(historical_validation_code(para, at), sender), Request::CandidatePendingAvailability(para, sender) => query!(candidate_pending_availability(para), sender), Request::CandidateEvents(sender) => query!(candidate_events(), sender), - Request::ValidatorDiscovery(ids, sender) => query!(validator_discovery(ids), sender), + Request::SessionInfo(index, sender) => query!(session_info(index), sender), Request::DmqContents(id, sender) => query!(dmq_contents(id), sender), + Request::InboundHrmpChannelsContents(id, sender) => query!(inbound_hrmp_channels_contents(id), sender), } } #[derive(Clone)] struct MetricsInner { chain_api_requests: prometheus::CounterVec, + make_runtime_api_request: prometheus::Histogram, } /// Runtime API metrics. @@ -150,6 +231,11 @@ impl Metrics { } } } + + /// Provide a timer for `make_runtime_api_request` which observes on drop. + fn time_make_runtime_api_request(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.make_runtime_api_request.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -165,6 +251,15 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + make_runtime_api_request: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_runtime_api_make_runtime_api_request", + "Time spent within `runtime_api::make_runtime_api_request`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } @@ -177,12 +272,12 @@ mod tests { use polkadot_primitives::v1::{ ValidatorId, ValidatorIndex, GroupRotationInfo, CoreState, PersistedValidationData, Id as ParaId, OccupiedCoreAssumption, ValidationData, SessionIndex, ValidationCode, - CommittedCandidateReceipt, CandidateEvent, AuthorityDiscoveryId, InboundDownwardMessage, + CommittedCandidateReceipt, CandidateEvent, InboundDownwardMessage, + BlockNumber, InboundHrmpMessage, SessionInfo, }; use polkadot_node_subsystem_test_helpers as test_helpers; use sp_core::testing::TaskExecutor; - - use std::collections::HashMap; + use std::{collections::{HashMap, BTreeMap}, sync::{Arc, Mutex}}; use futures::channel::oneshot; #[derive(Default, Clone)] @@ -190,13 +285,17 @@ mod tests { validators: Vec, validator_groups: Vec>, availability_cores: Vec, + availability_cores_wait: Arc>, validation_data: HashMap, session_index_for_child: SessionIndex, + session_info: HashMap, validation_code: HashMap, + historical_validation_code: HashMap>, validation_outputs_results: HashMap, candidate_pending_availability: HashMap, candidate_events: Vec, dmq_contents: HashMap>, + hrmp_channels: HashMap>>, } impl ProvideRuntimeApi for MockRuntimeApi { @@ -209,7 +308,7 @@ mod tests { sp_api::mock_impl_runtime_apis! { impl ParachainHost for MockRuntimeApi { - type Error = String; + type Error = sp_api::ApiError; fn validators(&self) -> Vec { self.validators.clone() @@ -227,6 +326,7 @@ mod tests { } fn availability_cores(&self) -> Vec { + let _ = self.availability_cores_wait.lock().unwrap(); self.availability_cores.clone() } @@ -249,7 +349,7 @@ mod tests { fn check_validation_outputs( &self, para_id: ParaId, - _commitments: polkadot_primitives::v1::ValidationOutputs, + _commitments: polkadot_primitives::v1::CandidateCommitments, ) -> bool { self.validation_outputs_results .get(¶_id) @@ -263,6 +363,10 @@ mod tests { self.session_index_for_child.clone() } + fn session_info(&self, index: SessionIndex) -> Option { + self.session_info.get(&index).cloned() + } + fn validation_code( &self, para: ParaId, @@ -271,6 +375,19 @@ mod tests { self.validation_code.get(¶).map(|c| c.clone()) } + fn historical_validation_code( + &self, + para: ParaId, + at: BlockNumber, + ) -> Option { + self.historical_validation_code.get(¶).and_then(|h_code| { + h_code.iter() + .take_while(|(changed_at, _)| changed_at <= &at) + .last() + .map(|(_, code)| code.clone()) + }) + } + fn candidate_pending_availability( &self, para: ParaId, @@ -282,16 +399,19 @@ mod tests { self.candidate_events.clone() } - fn validator_discovery(ids: Vec) -> Vec> { - vec![None; ids.len()] - } - fn dmq_contents( &self, recipient: ParaId, - ) -> Vec { + ) -> Vec { self.dmq_contents.get(&recipient).map(|q| q.clone()).unwrap_or_default() } + + fn inbound_hrmp_channels_contents( + &self, + recipient: ParaId + ) -> BTreeMap> { + self.hrmp_channels.get(&recipient).map(|q| q.clone()).unwrap_or_default() + } } } @@ -300,8 +420,9 @@ mod tests { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); let runtime_api = Arc::new(MockRuntimeApi::default()); let relay_parent = [1; 32].into(); + let spawner = sp_core::testing::TaskExecutor::new(); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -323,8 +444,9 @@ mod tests { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); let runtime_api = Arc::new(MockRuntimeApi::default()); let relay_parent = [1; 32].into(); + let spawner = sp_core::testing::TaskExecutor::new(); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -346,8 +468,9 @@ mod tests { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); let runtime_api = Arc::new(MockRuntimeApi::default()); let relay_parent = [1; 32].into(); + let spawner = sp_core::testing::TaskExecutor::new(); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -367,14 +490,16 @@ mod tests { #[test] fn requests_persisted_validation_data() { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); - let mut runtime_api = Arc::new(MockRuntimeApi::default()); let relay_parent = [1; 32].into(); let para_a = 5.into(); let para_b = 6.into(); + let spawner = sp_core::testing::TaskExecutor::new(); - Arc::get_mut(&mut runtime_api).unwrap().validation_data.insert(para_a, Default::default()); + let mut runtime_api = MockRuntimeApi::default(); + runtime_api.validation_data.insert(para_a, Default::default()); + let runtime_api = Arc::new(runtime_api); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -407,14 +532,16 @@ mod tests { #[test] fn requests_full_validation_data() { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); - let mut runtime_api = Arc::new(MockRuntimeApi::default()); let relay_parent = [1; 32].into(); let para_a = 5.into(); let para_b = 6.into(); + let spawner = sp_core::testing::TaskExecutor::new(); - Arc::get_mut(&mut runtime_api).unwrap().validation_data.insert(para_a, Default::default()); + let mut runtime_api = MockRuntimeApi::default(); + runtime_api.validation_data.insert(para_a, Default::default()); + let runtime_api = Arc::new(runtime_api); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -451,14 +578,15 @@ mod tests { let relay_parent = [1; 32].into(); let para_a = 5.into(); let para_b = 6.into(); - let commitments = polkadot_primitives::v1::ValidationOutputs::default(); + let commitments = polkadot_primitives::v1::CandidateCommitments::default(); + let spawner = sp_core::testing::TaskExecutor::new(); runtime_api.validation_outputs_results.insert(para_a, false); runtime_api.validation_outputs_results.insert(para_b, true); let runtime_api = Arc::new(runtime_api); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -505,8 +633,9 @@ mod tests { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); let runtime_api = Arc::new(MockRuntimeApi::default()); let relay_parent = [1; 32].into(); + let spawner = sp_core::testing::TaskExecutor::new(); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -523,17 +652,48 @@ mod tests { futures::executor::block_on(future::join(subsystem_task, test_task)); } + #[test] + fn requests_session_info() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let mut runtime_api = MockRuntimeApi::default(); + let session_index = 1; + runtime_api.session_info.insert(session_index, Default::default()); + let runtime_api = Arc::new(runtime_api); + let spawner = sp_core::testing::TaskExecutor::new(); + + let relay_parent = [1; 32].into(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request(relay_parent, Request::SessionInfo(session_index, tx)) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), Some(Default::default())); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + #[test] fn requests_validation_code() { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); - let mut runtime_api = Arc::new(MockRuntimeApi::default()); + let relay_parent = [1; 32].into(); let para_a = 5.into(); let para_b = 6.into(); + let spawner = sp_core::testing::TaskExecutor::new(); - Arc::get_mut(&mut runtime_api).unwrap().validation_code.insert(para_a, Default::default()); + let mut runtime_api = MockRuntimeApi::default(); + runtime_api.validation_code.insert(para_a, Default::default()); + let runtime_api = Arc::new(runtime_api); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -566,16 +726,16 @@ mod tests { #[test] fn requests_candidate_pending_availability() { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); - let mut runtime_api = MockRuntimeApi::default(); let relay_parent = [1; 32].into(); let para_a = 5.into(); let para_b = 6.into(); + let spawner = sp_core::testing::TaskExecutor::new(); + let mut runtime_api = MockRuntimeApi::default(); runtime_api.candidate_pending_availability.insert(para_a, Default::default()); - let runtime_api = Arc::new(runtime_api); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -611,8 +771,9 @@ mod tests { let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); let runtime_api = Arc::new(MockRuntimeApi::default()); let relay_parent = [1; 32].into(); + let spawner = sp_core::testing::TaskExecutor::new(); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -636,6 +797,7 @@ mod tests { let relay_parent = [1; 32].into(); let para_a = 5.into(); let para_b = 6.into(); + let spawner = sp_core::testing::TaskExecutor::new(); let runtime_api = Arc::new({ let mut runtime_api = MockRuntimeApi::default(); @@ -652,7 +814,7 @@ mod tests { runtime_api }); - let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None)); + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); let test_task = async move { let (tx, rx) = oneshot::channel(); @@ -684,4 +846,180 @@ mod tests { futures::executor::block_on(future::join(subsystem_task, test_task)); } + #[test] + fn requests_inbound_hrmp_channels_contents() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + + let relay_parent = [1; 32].into(); + let para_a = 99.into(); + let para_b = 66.into(); + let para_c = 33.into(); + let spawner = sp_core::testing::TaskExecutor::new(); + + let para_b_inbound_channels = [ + (para_a, vec![]), + ( + para_c, + vec![InboundHrmpMessage { + sent_at: 1, + data: "𝙀=𝙈𝘾²".as_bytes().to_owned(), + }], + ), + ] + .iter() + .cloned() + .collect::>(); + + let runtime_api = Arc::new({ + let mut runtime_api = MockRuntimeApi::default(); + + runtime_api.hrmp_channels.insert(para_a, BTreeMap::new()); + runtime_api + .hrmp_channels + .insert(para_b, para_b_inbound_channels.clone()); + + runtime_api + }); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + let (tx, rx) = oneshot::channel(); + ctx_handle + .send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::InboundHrmpChannelsContents(para_a, tx), + ), + }) + .await; + assert_eq!(rx.await.unwrap().unwrap(), BTreeMap::new()); + + let (tx, rx) = oneshot::channel(); + ctx_handle + .send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::InboundHrmpChannelsContents(para_b, tx), + ), + }) + .await; + assert_eq!(rx.await.unwrap().unwrap(), para_b_inbound_channels,); + + ctx_handle + .send(FromOverseer::Signal(OverseerSignal::Conclude)) + .await; + }; + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn requests_historical_code() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + + let para_a = 5.into(); + let para_b = 6.into(); + let spawner = sp_core::testing::TaskExecutor::new(); + + let runtime_api = Arc::new({ + let mut runtime_api = MockRuntimeApi::default(); + + runtime_api.historical_validation_code.insert( + para_a, + vec![(1, vec![1, 2, 3].into()), (10, vec![4, 5, 6].into())], + ); + + runtime_api.historical_validation_code.insert( + para_b, + vec![(5, vec![7, 8, 9].into())], + ); + + runtime_api + }); + let relay_parent = [1; 32].into(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api, Metrics(None), spawner); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + { + let (tx, rx) = oneshot::channel(); + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::HistoricalValidationCode(para_a, 5, tx), + ) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), Some(ValidationCode::from(vec![1, 2, 3]))); + } + + { + let (tx, rx) = oneshot::channel(); + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::HistoricalValidationCode(para_a, 10, tx), + ) + }).await; + + assert_eq!(rx.await.unwrap().unwrap(), Some(ValidationCode::from(vec![4, 5, 6]))); + } + + { + let (tx, rx) = oneshot::channel(); + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request( + relay_parent, + Request::HistoricalValidationCode(para_b, 1, tx), + ) + }).await; + + assert!(rx.await.unwrap().unwrap().is_none()); + } + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } + + #[test] + fn multiple_requests_in_parallel_are_working() { + let (ctx, mut ctx_handle) = test_helpers::make_subsystem_context(TaskExecutor::new()); + let runtime_api = Arc::new(MockRuntimeApi::default()); + let relay_parent = [1; 32].into(); + let spawner = sp_core::testing::TaskExecutor::new(); + let mutex = runtime_api.availability_cores_wait.clone(); + + let subsystem = RuntimeApiSubsystem::new(runtime_api.clone(), Metrics(None), spawner); + let subsystem_task = run(ctx, subsystem).map(|x| x.unwrap()); + let test_task = async move { + // Make all requests block until we release this mutex. + let lock = mutex.lock().unwrap(); + + let mut receivers = Vec::new(); + + for _ in 0..MAX_PARALLEL_REQUESTS * 10 { + let (tx, rx) = oneshot::channel(); + + ctx_handle.send(FromOverseer::Communication { + msg: RuntimeApiMessage::Request(relay_parent, Request::AvailabilityCores(tx)) + }).await; + + receivers.push(rx); + } + + let join = future::join_all(receivers); + + drop(lock); + + join.await + .into_iter() + .for_each(|r| assert_eq!(r.unwrap().unwrap(), runtime_api.availability_cores)); + + ctx_handle.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; + }; + + futures::executor::block_on(future::join(subsystem_task, test_task)); + } } diff --git a/node/network/availability-distribution/Cargo.toml b/node/network/availability-distribution/Cargo.toml index 1f032b7f054b36e09b87ee5717af5bbe3306b598..0aff69baa739a6152f6d99fb57aa55375db8e4c8 100644 --- a/node/network/availability-distribution/Cargo.toml +++ b/node/network/availability-distribution/Cargo.toml @@ -5,25 +5,27 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" -codec = { package="parity-scale-codec", version = "1.3.4", features = ["std"] } +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" +parity-scale-codec = { version = "1.3.5", features = ["std"] } polkadot-primitives = { path = "../../../primitives" } polkadot-erasure-coding = { path = "../../../erasure-coding" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-network-protocol = { path = "../../network/protocol" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.21" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", features = ["std"] } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +thiserror = "1.0.22" [dev-dependencies] polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", features = ["std"] } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } futures-timer = "3.0.2" -env_logger = "0.7.1" -assert_matches = "1.3.0" -smallvec = "1" +env_logger = "0.8.2" +assert_matches = "1.4.0" +smallvec = "1.5.1" +log = "0.4.11" diff --git a/node/network/availability-distribution/src/lib.rs b/node/network/availability-distribution/src/lib.rs index 066908f9f4b7473e4bb32971c73abf2cd428a629..4ca045a7bcdf8474303e1c3f155bcdd01bfa2b7a 100644 --- a/node/network/availability-distribution/src/lib.rs +++ b/node/network/availability-distribution/src/lib.rs @@ -24,13 +24,12 @@ #![deny(unused_crate_dependencies, unused_qualifications)] -use codec::{Decode, Encode}; +use parity_scale_codec::{Decode, Encode}; use futures::{channel::oneshot, FutureExt, TryFutureExt}; use sp_core::crypto::Public; use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; -use log::{trace, warn}; use polkadot_erasure_coding::branch_hash; use polkadot_node_network_protocol::{ v1 as protocol_v1, NetworkBridgeEvent, PeerId, ReputationChange as Rep, View, @@ -38,7 +37,7 @@ use polkadot_node_network_protocol::{ use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_primitives::v1::{ BlakeTwo256, CommittedCandidateReceipt, CoreState, ErasureChunk, Hash, HashT, Id as ParaId, - SessionIndex, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, + SessionIndex, ValidatorId, ValidatorIndex, PARACHAIN_KEY_TYPE_ID, CandidateHash, }; use polkadot_subsystem::messages::{ AllMessages, AvailabilityDistributionMessage, AvailabilityStoreMessage, ChainApiMessage, @@ -53,66 +52,47 @@ use std::collections::{HashMap, HashSet}; use std::iter; use thiserror::Error; -const TARGET: &'static str = "avad"; +#[cfg(test)] +mod tests; + +const LOG_TARGET: &'static str = "availability_distribution"; #[derive(Debug, Error)] enum Error { - #[error("Sending PendingAvailability query failed")] - QueryPendingAvailabilitySendQuery(#[source] SubsystemError), #[error("Response channel to obtain PendingAvailability failed")] QueryPendingAvailabilityResponseChannel(#[source] oneshot::Canceled), #[error("RuntimeAPI to obtain PendingAvailability failed")] QueryPendingAvailability(#[source] RuntimeApiError), - #[error("Sending StoreChunk query failed")] - StoreChunkSendQuery(#[source] SubsystemError), #[error("Response channel to obtain StoreChunk failed")] StoreChunkResponseChannel(#[source] oneshot::Canceled), - #[error("Sending QueryChunk query failed")] - QueryChunkSendQuery(#[source] SubsystemError), #[error("Response channel to obtain QueryChunk failed")] QueryChunkResponseChannel(#[source] oneshot::Canceled), - #[error("Sending QueryAncestors query failed")] - QueryAncestorsSendQuery(#[source] SubsystemError), #[error("Response channel to obtain QueryAncestors failed")] QueryAncestorsResponseChannel(#[source] oneshot::Canceled), #[error("RuntimeAPI to obtain QueryAncestors failed")] QueryAncestors(#[source] ChainApiError), - #[error("Sending QuerySession query failed")] - QuerySessionSendQuery(#[source] SubsystemError), #[error("Response channel to obtain QuerySession failed")] QuerySessionResponseChannel(#[source] oneshot::Canceled), #[error("RuntimeAPI to obtain QuerySession failed")] QuerySession(#[source] RuntimeApiError), - #[error("Sending QueryValidators query failed")] - QueryValidatorsSendQuery(#[source] SubsystemError), #[error("Response channel to obtain QueryValidators failed")] QueryValidatorsResponseChannel(#[source] oneshot::Canceled), #[error("RuntimeAPI to obtain QueryValidators failed")] QueryValidators(#[source] RuntimeApiError), - #[error("Sending AvailabilityCores query failed")] - AvailabilityCoresSendQuery(#[source] SubsystemError), #[error("Response channel to obtain AvailabilityCores failed")] AvailabilityCoresResponseChannel(#[source] oneshot::Canceled), #[error("RuntimeAPI to obtain AvailabilityCores failed")] AvailabilityCores(#[source] RuntimeApiError), - #[error("Sending AvailabilityCores query failed")] - QueryAvailabilitySendQuery(#[source] SubsystemError), #[error("Response channel to obtain AvailabilityCores failed")] QueryAvailabilityResponseChannel(#[source] oneshot::Canceled), - #[error("Sending out a peer report message")] - ReportPeerMessageSend(#[source] SubsystemError), - - #[error("Sending a gossip message")] - TrackedGossipMessage(#[source] SubsystemError), - #[error("Receive channel closed")] IncomingMessageChannel(#[source] SubsystemError), } @@ -130,7 +110,7 @@ const BENEFIT_VALID_MESSAGE: Rep = Rep::new(10, "Valid message"); #[derive(Encode, Decode, Debug, Clone, PartialEq, Eq, Hash)] pub struct AvailabilityGossipMessage { /// Anchor hash of the candidate the `ErasureChunk` is associated to. - pub candidate_hash: Hash, + pub candidate_hash: CandidateHash, /// The erasure chunk, a encoded information part of `AvailabilityData`. pub erasure_chunk: ErasureChunk, } @@ -149,13 +129,13 @@ struct ProtocolState { /// Caches a mapping of relay parents or ancestor to live candidate receipts. /// Allows fast intersection of live candidates with views and consecutive unioning. /// Maps relay parent / ancestor -> live candidate receipts + its hash. - receipts: HashMap>, + receipts: HashMap>, /// Allow reverse caching of view checks. /// Maps candidate hash -> relay parent for extracting meta information from `PerRelayParent`. /// Note that the presence of this is not sufficient to determine if deletion is OK, i.e. /// two histories could cover this. - reverse: HashMap, + reverse: HashMap, /// Keeps track of which candidate receipts are required due to ancestors of which relay parents /// of our view. @@ -166,7 +146,7 @@ struct ProtocolState { per_relay_parent: HashMap, /// Track data that is specific to a candidate. - per_candidate: HashMap, + per_candidate: HashMap, } #[derive(Debug, Clone, Default)] @@ -176,11 +156,11 @@ struct PerCandidate { /// candidate hash + erasure chunk index -> gossip message message_vault: HashMap, - /// Track received candidate hashes and chunk indices from peers. - received_messages: HashMap>, + /// Track received candidate hashes and validator indices from peers. + received_messages: HashMap>, /// Track already sent candidate hashes and the erasure chunk index to the peers. - sent_messages: HashMap>, + sent_messages: HashMap>, /// The set of validators. validators: Vec, @@ -189,6 +169,14 @@ struct PerCandidate { validator_index: Option, } +impl PerCandidate { + /// Returns `true` iff the given `message` is required by the given `peer`. + fn message_required_by_peer(&self, peer: &PeerId, message: &(CandidateHash, ValidatorIndex)) -> bool { + self.received_messages.get(peer).map(|v| !v.contains(message)).unwrap_or(true) + && self.sent_messages.get(peer).map(|v| !v.contains(message)).unwrap_or(true) + } +} + #[derive(Debug, Clone, Default)] struct PerRelayParent { /// Set of `K` ancestors for this relay parent. @@ -197,6 +185,7 @@ struct PerRelayParent { impl ProtocolState { /// Collects the relay_parents ancestors including the relay parents themselfes. + #[tracing::instrument(level = "trace", skip(relay_parents), fields(subsystem = LOG_TARGET))] fn extend_with_ancestors<'a>( &'a self, relay_parents: impl IntoIterator + 'a, @@ -218,10 +207,11 @@ impl ProtocolState { /// Unionize all cached entries for the given relay parents and its ancestors. /// Ignores all non existent relay parents, so this can be used directly with a peers view. /// Returns a map from candidate hash -> receipt + #[tracing::instrument(level = "trace", skip(relay_parents), fields(subsystem = LOG_TARGET))] fn cached_live_candidates_unioned<'a>( &'a self, relay_parents: impl IntoIterator + 'a, - ) -> HashMap { + ) -> HashMap { let relay_parents_and_ancestors = self.extend_with_ancestors(relay_parents); relay_parents_and_ancestors .into_iter() @@ -229,9 +219,10 @@ impl ProtocolState { .map(|receipt_set| receipt_set.into_iter()) .flatten() .map(|(receipt_hash, receipt)| (receipt_hash.clone(), receipt.clone())) - .collect::>() + .collect() } + #[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn add_relay_parent( &mut self, ctx: &mut Context, @@ -287,7 +278,8 @@ impl ProtocolState { Ok(()) } - fn remove_relay_parent(&mut self, relay_parent: &Hash) -> Result<()> { + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + fn remove_relay_parent(&mut self, relay_parent: &Hash) { // we might be ancestor of some other relay_parent if let Some(ref mut descendants) = self.ancestry.get_mut(relay_parent) { // if we were the last user, and it is @@ -296,8 +288,9 @@ impl ProtocolState { // remove from the ancestry index self.ancestry.remove(relay_parent); // and also remove the actual receipt - self.receipts.remove(relay_parent); - self.per_candidate.remove(relay_parent); + if let Some(candidates) = self.receipts.remove(relay_parent) { + candidates.into_iter().for_each(|c| { self.per_candidate.remove(&c.0); }); + } } } if let Some(per_relay_parent) = self.per_relay_parent.remove(relay_parent) { @@ -313,18 +306,19 @@ impl ProtocolState { // remove from the ancestry index self.ancestry.remove(&ancestor); // and also remove the actual receipt - self.receipts.remove(&ancestor); - self.per_candidate.remove(&ancestor); + if let Some(candidates) = self.receipts.remove(&ancestor) { + candidates.into_iter().for_each(|c| { self.per_candidate.remove(&c.0); }); + } } } } } - Ok(()) } } /// Deal with network bridge updates and track what needs to be tracked /// which depends on the message type received. +#[tracing::instrument(level = "trace", skip(ctx, keystore, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, keystore: &SyncCryptoStorePtr, @@ -345,7 +339,7 @@ where state.peer_views.remove(&peerid); } NetworkBridgeEvent::PeerViewChange(peerid, view) => { - handle_peer_view_change(ctx, state, peerid, view, metrics).await?; + handle_peer_view_change(ctx, state, peerid, view, metrics).await; } NetworkBridgeEvent::OurViewChange(view) => { handle_our_view_change(ctx, keystore, state, view, metrics).await?; @@ -368,6 +362,7 @@ where } /// Handle the changes necessary when our view changes. +#[tracing::instrument(level = "trace", skip(ctx, keystore, metrics), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( ctx: &mut Context, keystore: &SyncCryptoStorePtr, @@ -378,24 +373,24 @@ async fn handle_our_view_change( where Context: SubsystemContext, { - let old_view = std::mem::replace(&mut (state.view), view); + let _timer = metrics.time_handle_our_view_change(); + + let old_view = std::mem::replace(&mut state.view, view); // needed due to borrow rules let view = state.view.clone(); - let added = view.difference(&old_view).collect::>(); // add all the relay parents and fill the cache - for added in added.iter() { - let added = **added; - let validators = query_validators(ctx, added).await?; + for added in view.difference(&old_view) { + let validators = query_validators(ctx, *added).await?; let validator_index = obtain_our_validator_index(&validators, keystore.clone()).await; state - .add_relay_parent(ctx, added, validators, validator_index) + .add_relay_parent(ctx, *added, validators, validator_index) .await?; } // handle all candidates - for (candidate_hash, _receipt) in state.cached_live_candidates_unioned(added) { + for (candidate_hash, _receipt) in state.cached_live_candidates_unioned(view.difference(&old_view)) { let per_candidate = state.per_candidate.entry(candidate_hash).or_default(); // assure the node has the validator role @@ -432,12 +427,9 @@ where // obtain the chunks from the cache, if not fallback // and query the availability store let message_id = (candidate_hash, chunk_index); - let erasure_chunk = if let Some(message) = per_candidate.message_vault.get(&chunk_index) - { + let erasure_chunk = if let Some(message) = per_candidate.message_vault.get(&chunk_index) { message.erasure_chunk.clone() - } else if let Some(erasure_chunk) = - query_chunk(ctx, candidate_hash, chunk_index as ValidatorIndex).await? - { + } else if let Some(erasure_chunk) = query_chunk(ctx, candidate_hash, chunk_index as ValidatorIndex).await? { erasure_chunk } else { continue; @@ -447,30 +439,22 @@ where let peers = peers .iter() - .filter(|peer| { - // only pick those which were not sent before - !per_candidate - .sent_messages - .get(*peer) - .filter(|set| set.contains(&message_id)) - .is_some() - }) - .map(|peer| peer.clone()) + .filter(|peer| per_candidate.message_required_by_peer(peer, &message_id)) + .cloned() .collect::>(); let message = AvailabilityGossipMessage { candidate_hash, erasure_chunk, }; - send_tracked_gossip_message_to_peers(ctx, per_candidate, metrics, peers, message) - .await?; + send_tracked_gossip_message_to_peers(ctx, per_candidate, metrics, peers, message).await; } } // cleanup the removed relay parents and their states let removed = old_view.difference(&view).collect::>(); for removed in removed { - state.remove_relay_parent(&removed)?; + state.remove_relay_parent(&removed); } Ok(()) } @@ -482,12 +466,11 @@ async fn send_tracked_gossip_message_to_peers( metrics: &Metrics, peers: Vec, message: AvailabilityGossipMessage, -) -> Result<()> +) where Context: SubsystemContext, { - send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, peers, iter::once(message)) - .await + send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, peers, iter::once(message)).await } #[inline(always)] @@ -497,26 +480,26 @@ async fn send_tracked_gossip_messages_to_peer( metrics: &Metrics, peer: PeerId, message_iter: impl IntoIterator, -) -> Result<()> +) where Context: SubsystemContext, { - send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, vec![peer], message_iter) - .await + send_tracked_gossip_messages_to_peers(ctx, per_candidate, metrics, vec![peer], message_iter).await } +#[tracing::instrument(level = "trace", skip(ctx, metrics, message_iter), fields(subsystem = LOG_TARGET))] async fn send_tracked_gossip_messages_to_peers( ctx: &mut Context, per_candidate: &mut PerCandidate, metrics: &Metrics, peers: Vec, message_iter: impl IntoIterator, -) -> Result<()> +) where Context: SubsystemContext, { if peers.is_empty() { - return Ok(()); + return; } for message in message_iter { for peer in peers.iter() { @@ -543,24 +526,22 @@ where protocol_v1::ValidationProtocol::AvailabilityDistribution(wire_message), ), )) - .await - .map_err(|e| Error::TrackedGossipMessage(e))?; + .await; metrics.on_chunk_distributed(); } - - Ok(()) } // Send the difference between two views which were not sent // to that particular peer. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut Context, state: &mut ProtocolState, origin: PeerId, view: View, metrics: &Metrics, -) -> Result<()> +) where Context: SubsystemContext, { @@ -591,23 +572,13 @@ where per_candidate .message_vault .get(&erasure_chunk_index) - .filter(|_| { - // check if that erasure chunk was already sent before - if let Some(sent_set) = per_candidate.sent_messages.get(&origin) { - if sent_set.contains(&message_id) { - return false; - } - } - true - }) + .filter(|_| per_candidate.message_required_by_peer(&origin, &message_id)) }) .cloned() .collect::>(); - send_tracked_gossip_messages_to_peer(ctx, per_candidate, metrics, origin.clone(), messages) - .await?; + send_tracked_gossip_messages_to_peer(ctx, per_candidate, metrics, origin.clone(), messages).await; } - Ok(()) } /// Obtain the first key which has a signing key. @@ -631,6 +602,7 @@ async fn obtain_our_validator_index( } /// Handle an incoming message from a peer. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut ProtocolState, @@ -641,19 +613,21 @@ async fn process_incoming_peer_message( where Context: SubsystemContext, { + let _timer = metrics.time_process_incoming_peer_message(); + // obtain the set of candidates we are interested in based on our current view let live_candidates = state.cached_live_candidates_unioned(state.view.0.iter()); // check if the candidate is of interest - let live_candidate = if let Some(live_candidate) = live_candidates.get(&message.candidate_hash) - { + let live_candidate = if let Some(live_candidate) = live_candidates.get(&message.candidate_hash) { live_candidate } else { - return modify_reputation(ctx, origin, COST_NOT_A_LIVE_CANDIDATE).await; + modify_reputation(ctx, origin, COST_NOT_A_LIVE_CANDIDATE).await; + return Ok(()); }; // check the merkle proof - let root = &live_candidate.commitments.erasure_root; + let root = &live_candidate.descriptor.erasure_root; let anticipated_hash = if let Ok(hash) = branch_hash( root, &message.erasure_chunk.proof, @@ -661,12 +635,14 @@ where ) { hash } else { - return modify_reputation(ctx, origin, COST_MERKLE_PROOF_INVALID).await; + modify_reputation(ctx, origin, COST_MERKLE_PROOF_INVALID).await; + return Ok(()); }; let erasure_chunk_hash = BlakeTwo256::hash(&message.erasure_chunk.chunk); if anticipated_hash != erasure_chunk_hash { - return modify_reputation(ctx, origin, COST_MERKLE_PROOF_INVALID).await; + modify_reputation(ctx, origin, COST_MERKLE_PROOF_INVALID).await; + return Ok(()); } // an internal unique identifier of this message @@ -682,7 +658,8 @@ where .entry(origin.clone()) .or_default(); if received_set.contains(&message_id) { - return modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await; + modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await; + return Ok(()); } else { received_set.insert(message_id.clone()); } @@ -694,9 +671,9 @@ where .insert(message_id.1, message.clone()) .is_some() { - modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE).await?; + modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE).await; } else { - modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE_FIRST).await?; + modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE_FIRST).await; // save the chunk for our index if let Some(validator_index) = per_candidate.validator_index { @@ -710,8 +687,8 @@ where ) .await? { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "Failed to store erasure chunk to availability store" ); } @@ -737,19 +714,12 @@ where let peers = peers .into_iter() - .filter(|peer| { - let peer: PeerId = peer.clone(); - // avoid sending duplicate messages - per_candidate - .sent_messages - .entry(peer) - .or_default() - .contains(&message_id) - }) + .filter(|peer| per_candidate.message_required_by_peer(peer, &message_id)) .collect::>(); // gossip that message to interested peers - send_tracked_gossip_message_to_peers(ctx, per_candidate, metrics, peers, message).await + send_tracked_gossip_message_to_peers(ctx, per_candidate, metrics, peers, message).await; + Ok(()) } /// The bitfield distribution subsystem. @@ -770,6 +740,7 @@ impl AvailabilityDistributionSubsystem { } /// Start processing work as passed on from the Overseer. + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, mut ctx: Context) -> Result<()> where Context: SubsystemContext, @@ -794,9 +765,10 @@ impl AvailabilityDistributionSubsystem { ) .await { - warn!( - target: TARGET, - "Failed to handle incoming network messages: {:?}", e + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to handle incoming network messages", ); } } @@ -823,7 +795,6 @@ where let future = self .run(ctx) .map_err(|e| SubsystemError::with_origin("availability-distribution", e)) - .map(|_| ()) .boxed(); SpawnedSubsystem { @@ -834,6 +805,7 @@ where } /// Obtain all live candidates based on an iterator of relay heads. +#[tracing::instrument(level = "trace", skip(ctx, relay_parents), fields(subsystem = LOG_TARGET))] async fn query_live_candidates_without_ancestors( ctx: &mut Context, relay_parents: impl IntoIterator, @@ -859,11 +831,12 @@ where /// Obtain all live candidates based on an iterator or relay heads including `k` ancestors. /// /// Relay parent. +#[tracing::instrument(level = "trace", skip(ctx, relay_parents), fields(subsystem = LOG_TARGET))] async fn query_live_candidates( ctx: &mut Context, state: &mut ProtocolState, relay_parents: impl IntoIterator, -) -> Result> +) -> Result> where Context: SubsystemContext, { @@ -872,7 +845,7 @@ where let capacity = hint.1.unwrap_or(hint.0) * (1 + AvailabilityDistributionSubsystem::K); let mut live_candidates = - HashMap::::with_capacity(capacity); + HashMap::::with_capacity(capacity); for relay_parent in iter { // register one of relay parents (not the ancestors) @@ -921,6 +894,7 @@ where } /// Query all para IDs. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_para_ids(ctx: &mut Context, relay_parent: Hash) -> Result> where Context: SubsystemContext, @@ -930,8 +904,7 @@ where relay_parent, RuntimeApiRequest::AvailabilityCores(tx), ))) - .await - .map_err(|e| Error::AvailabilityCoresSendQuery(e))?; + .await; let all_para_ids: Vec<_> = rx .await @@ -952,41 +925,41 @@ where } /// Modify the reputation of a peer based on its behavior. -async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) -> Result<()> +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) where Context: SubsystemContext, { - trace!( - target: TARGET, - "Reputation change of {:?} for peer {:?}", - rep, - peer + tracing::trace!( + target: LOG_TARGET, + rep = ?rep, + peer_id = ?peer, + "Reputation change for peer", ); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), - )) - .await - .map_err(|e| Error::ReportPeerMessageSend(e)) + )).await; } /// Query the proof of validity for a particular candidate hash. -async fn query_data_availability(ctx: &mut Context, candidate_hash: Hash) -> Result +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +async fn query_data_availability(ctx: &mut Context, candidate_hash: CandidateHash) -> Result where Context: SubsystemContext, { let (tx, rx) = oneshot::channel(); ctx.send_message(AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryDataAvailability(candidate_hash, tx), - )) - .await - .map_err(|e| Error::QueryAvailabilitySendQuery(e))?; + )).await; + rx.await .map_err(|e| Error::QueryAvailabilityResponseChannel(e)) } +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_chunk( ctx: &mut Context, - candidate_hash: Hash, + candidate_hash: CandidateHash, validator_index: ValidatorIndex, ) -> Result> where @@ -995,15 +968,15 @@ where let (tx, rx) = oneshot::channel(); ctx.send_message(AllMessages::AvailabilityStore( AvailabilityStoreMessage::QueryChunk(candidate_hash, validator_index, tx), - )) - .await - .map_err(|e| Error::QueryChunkSendQuery(e))?; + )).await; + rx.await.map_err(|e| Error::QueryChunkResponseChannel(e)) } +#[tracing::instrument(level = "trace", skip(ctx, erasure_chunk), fields(subsystem = LOG_TARGET))] async fn store_chunk( ctx: &mut Context, - candidate_hash: Hash, + candidate_hash: CandidateHash, relay_parent: Hash, validator_index: ValidatorIndex, erasure_chunk: ErasureChunk, @@ -1012,22 +985,21 @@ where Context: SubsystemContext, { let (tx, rx) = oneshot::channel(); - ctx.send_message( - AllMessages::AvailabilityStore( - AvailabilityStoreMessage::StoreChunk { - candidate_hash, - relay_parent, - validator_index, - chunk: erasure_chunk, - tx, - } - )).await - .map_err(|e| Error::StoreChunkSendQuery(e))?; - - rx.await.map_err(|e| Error::StoreChunkResponseChannel(e)) + ctx.send_message(AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreChunk { + candidate_hash, + relay_parent, + validator_index, + chunk: erasure_chunk, + tx, + } + )).await; + + rx.await.map_err(|e| Error::StoreChunkResponseChannel(e)) } /// Request the head data for a particular para. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_pending_availability( ctx: &mut Context, relay_parent: Hash, @@ -1040,9 +1012,7 @@ where ctx.send_message(AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, RuntimeApiRequest::CandidatePendingAvailability(para, tx), - ))) - .await - .map_err(|e| Error::QueryPendingAvailabilitySendQuery(e))?; + ))).await; rx.await .map_err(|e| Error::QueryPendingAvailabilityResponseChannel(e))? @@ -1050,6 +1020,7 @@ where } /// Query the validator set. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_validators( ctx: &mut Context, relay_parent: Hash, @@ -1064,14 +1035,14 @@ where )); ctx.send_message(query_validators) - .await - .map_err(|e| Error::QueryValidatorsSendQuery(e))?; + .await; rx.await .map_err(|e| Error::QueryValidatorsResponseChannel(e))? .map_err(|e| Error::QueryValidators(e)) } /// Query the hash of the `K` ancestors +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_k_ancestors( ctx: &mut Context, relay_parent: Hash, @@ -1088,14 +1059,14 @@ where }); ctx.send_message(query_ancestors) - .await - .map_err(|e| Error::QueryAncestorsSendQuery(e))?; + .await; rx.await .map_err(|e| Error::QueryAncestorsResponseChannel(e))? .map_err(|e| Error::QueryAncestors(e)) } /// Query the session index of a relay parent +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_session_index_for_child( ctx: &mut Context, relay_parent: Hash, @@ -1110,14 +1081,14 @@ where )); ctx.send_message(query_session_idx_for_child) - .await - .map_err(|e| Error::QuerySessionSendQuery(e))?; + .await; rx.await .map_err(|e| Error::QuerySessionResponseChannel(e))? .map_err(|e| Error::QuerySession(e)) } /// Queries up to k ancestors with the constraints of equiv session +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_up_to_k_ancestors_in_same_session( ctx: &mut Context, relay_parent: Hash, @@ -1158,6 +1129,8 @@ where #[derive(Clone)] struct MetricsInner { gossipped_availability_chunks: prometheus::Counter, + handle_our_view_change: prometheus::Histogram, + process_incoming_peer_message: prometheus::Histogram, } /// Availability Distribution metrics. @@ -1170,6 +1143,16 @@ impl Metrics { metrics.gossipped_availability_chunks.inc(); } } + + /// Provide a timer for `handle_our_view_change` which observes on drop. + fn time_handle_our_view_change(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_our_view_change.start_timer()) + } + + /// Provide a timer for `process_incoming_peer_message` which observes on drop. + fn time_process_incoming_peer_message(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_incoming_peer_message.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -1184,10 +1167,25 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + handle_our_view_change: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_availability_distribution_handle_our_view_change", + "Time spent within `availability_distribution::handle_our_view_change`", + ) + )?, + registry, + )?, + process_incoming_peer_message: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_availability_distribution_process_incoming_peer_message", + "Time spent within `availability_distribution::process_incoming_peer_message`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } - -#[cfg(test)] -mod tests; diff --git a/node/network/availability-distribution/src/tests.rs b/node/network/availability-distribution/src/tests.rs index 8012da546b09e34dc08e7edd36c332fd4db894fc..b55c7a2241deaead238c3380d60c7e4e887609ed 100644 --- a/node/network/availability-distribution/src/tests.rs +++ b/node/network/availability-distribution/src/tests.rs @@ -23,7 +23,7 @@ use polkadot_primitives::v1::{ AvailableData, BlockData, CandidateCommitments, CandidateDescriptor, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore, PersistedValidationData, PoV, ScheduledCore, }; -use polkadot_subsystem_testhelpers::{self as test_helpers}; +use polkadot_subsystem_testhelpers as test_helpers; use futures::{executor, future, Future}; use futures_timer::Delay; @@ -103,7 +103,7 @@ async fn overseer_send( overseer: &mut test_helpers::TestSubsystemContextHandle, msg: AvailabilityDistributionMessage, ) { - log::trace!("Sending message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "sending message"); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -114,13 +114,13 @@ async fn overseer_send( async fn overseer_recv( overseer: &mut test_helpers::TestSubsystemContextHandle, ) -> AllMessages { - log::trace!("Waiting for message ..."); + tracing::trace!("waiting for message ..."); let msg = overseer .recv() .timeout(TIMEOUT) .await .expect("TIMEOUT is enough to recv."); - log::trace!("Received message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "received message"); msg } @@ -218,6 +218,7 @@ impl Default for TestState { block_number: Default::default(), hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), + max_pov_size: 1024, }; let validator_index = Some((validators.len() - 1) as ValidatorIndex); @@ -241,7 +242,7 @@ impl Default for TestState { fn make_available_data(test: &TestState, pov: PoV) -> AvailableData { AvailableData { validation_data: test.persisted_validation_data.clone(), - pov, + pov: Arc::new(pov), } } @@ -254,7 +255,7 @@ fn make_erasure_root(test: &TestState, pov: PoV) -> Hash { fn make_valid_availability_gossip( test: &TestState, - candidate_hash: Hash, + candidate_hash: CandidateHash, erasure_chunk_index: u32, pov: PoV, ) -> AvailabilityGossipMessage { @@ -289,11 +290,11 @@ impl TestCandidateBuilder { para_id: self.para_id, pov_hash: self.pov_hash, relay_parent: self.relay_parent, + erasure_root: self.erasure_root, ..Default::default() }, commitments: CandidateCommitments { head_data: self.head_data, - erasure_root: self.erasure_root, ..Default::default() }, } @@ -313,16 +314,16 @@ fn helper_integrity() { let candidate = TestCandidateBuilder { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, - pov_hash: pov_hash, + pov_hash, erasure_root: make_erasure_root(&test_state, pov_block.clone()), ..Default::default() } .build(); let message = - make_valid_availability_gossip(&test_state, dbg!(candidate.hash()), 2, pov_block.clone()); + make_valid_availability_gossip(&test_state, candidate.hash(), 2, pov_block.clone()); - let root = dbg!(&candidate.commitments.erasure_root); + let root = dbg!(&candidate.descriptor.erasure_root); let anticipated_hash = branch_hash( root, @@ -394,7 +395,7 @@ fn reputation_verification() { } .build(), TestCandidateBuilder { - para_id: test_state.chain_ids[0], + para_id: test_state.chain_ids[1], relay_parent: test_state.relay_parent, pov_hash: pov_hash_b, erasure_root: make_erasure_root(&test_state, pov_block_b.clone()), @@ -438,11 +439,11 @@ fn reputation_verification() { let peer_b = PeerId::random(); assert_ne!(&peer_a, &peer_b); - log::trace!("peer A: {:?}", peer_a); - log::trace!("peer B: {:?}", peer_b); + tracing::trace!("peer A: {:?}", peer_a); + tracing::trace!("peer B: {:?}", peer_b); - log::trace!("candidate A: {:?}", candidates[0].hash()); - log::trace!("candidate B: {:?}", candidates[1].hash()); + tracing::trace!("candidate A: {:?}", candidates[0].hash()); + tracing::trace!("candidate B: {:?}", candidates[1].hash()); overseer_signal( &mut virtual_overseer, @@ -626,7 +627,7 @@ fn reputation_verification() { let mut candidates2 = candidates.clone(); // check if the availability store can provide the desired erasure chunks for i in 0usize..2 { - log::trace!("0000"); + tracing::trace!("0000"); let avail_data = make_available_data(&test_state, pov_block_a.clone()); let chunks = derive_erasure_chunks_with_proofs(test_state.validators.len(), &avail_data); @@ -642,19 +643,17 @@ fn reputation_verification() { ) ) => { let index = candidates2.iter().enumerate().find(|x| { x.1.hash() == candidate_hash }).map(|x| x.0).unwrap(); - expected = dbg!(candidates2.swap_remove(index).hash()); - tx.send( - i == 0 - ).unwrap(); + expected = candidates2.swap_remove(index).hash(); + tx.send(i == 0).unwrap(); } ); assert_eq!(chunks.len(), test_state.validators.len()); - log::trace!("xxxx"); + tracing::trace!("xxxx"); // retrieve a stored chunk for (j, chunk) in chunks.into_iter().enumerate() { - log::trace!("yyyy i={}, j={}", i, j); + tracing::trace!("yyyy i={}, j={}", i, j); if i != 0 { // not a validator, so this never happens break; @@ -761,6 +760,23 @@ fn reputation_verification() { ) .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage( + peers, + protocol_v1::ValidationProtocol::AvailabilityDistribution( + protocol_v1::AvailabilityDistributionMessage::Chunk(hash, chunk), + ), + ) + ) => { + assert_eq!(1, peers.len()); + assert_eq!(peers[0], peer_a); + assert_eq!(candidates[0].hash(), hash); + assert_eq!(valid.erasure_chunk, chunk); + } + ); + assert_matches!( overseer_recv(&mut virtual_overseer).await, AllMessages::NetworkBridge( @@ -860,7 +876,7 @@ fn reputation_verification() { { // send another message - let valid2: AvailabilityGossipMessage = make_valid_availability_gossip( + let valid2 = make_valid_availability_gossip( &test_state, candidates[2].hash(), 1, @@ -889,6 +905,102 @@ fn reputation_verification() { } ); } + + { + // send another message + let valid = make_valid_availability_gossip( + &test_state, + candidates[1].hash(), + 2, + pov_block_b.clone(), + ); + + // Make peer a and b listen on `current` + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange(peer_a.clone(), view![current]), + ), + ) + .await; + + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange(peer_b.clone(), view![current]), + ), + ) + .await; + + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + chunk_protocol_message(valid.clone()), + ), + ), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer( + peer, + rep + ) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage( + peers, + protocol_v1::ValidationProtocol::AvailabilityDistribution( + protocol_v1::AvailabilityDistributionMessage::Chunk(hash, chunk), + ), + ) + ) => { + assert_eq!(1, peers.len()); + assert_eq!(peers[0], peer_b); + assert_eq!(candidates[1].hash(), hash); + assert_eq!(valid.erasure_chunk, chunk); + } + ); + + // Let B send the same message + overseer_send( + &mut virtual_overseer, + AvailabilityDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + chunk_protocol_message(valid.clone()), + ), + ), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer( + peer, + rep + ) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, BENEFIT_VALID_MESSAGE); + } + ); + + // There shouldn't be any other message. + assert!(virtual_overseer.recv().timeout(TIMEOUT).await.is_none()); + } }); } diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index 41e34cb2f7cf605a50ca37f1d78eabe2950105f2..93bde9c58f746bead3beb945124f4f06cded77de 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -5,9 +5,10 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" -codec = { package="parity-scale-codec", version = "1.3.4" } +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } @@ -16,11 +17,10 @@ polkadot-node-network-protocol = { path = "../../network/protocol" } [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } maplit = "1.0.2" -env_logger = "0.7.1" -assert_matches = "1.3.0" -tempfile = "3.1.0" +log = "0.4.11" +env_logger = "0.8.2" +assert_matches = "1.4.0" diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 7a0235e8e62c39a1f52931976e36051b1cd58152..2d1313c58e5692e91d94cec3c825187ec75e1a87 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -22,20 +22,16 @@ #![deny(unused_crate_dependencies)] -use codec::{Decode, Encode}; -use futures::{channel::oneshot, FutureExt, TryFutureExt}; +use parity_scale_codec::{Decode, Encode}; +use futures::{channel::oneshot, FutureExt}; -use log::{trace, warn}; use polkadot_subsystem::messages::*; use polkadot_subsystem::{ ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemResult, }; -use polkadot_node_subsystem_util::{ - metrics::{self, prometheus}, -}; +use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_primitives::v1::{Hash, SignedAvailabilityBitfield, SigningContext, ValidatorId}; use polkadot_node_network_protocol::{v1 as protocol_v1, PeerId, NetworkBridgeEvent, View, ReputationChange}; -use polkadot_subsystem::SubsystemError; use std::collections::{HashMap, HashSet}; const COST_SIGNATURE_INVALID: ReputationChange = @@ -82,7 +78,7 @@ impl BitfieldGossipMessage { /// Data used to track information of peers and relay parents the /// overseer ordered us to work on. -#[derive(Default, Clone)] +#[derive(Default, Clone, Debug)] struct ProtocolState { /// track all active peers and their views /// to determine what is relevant to them. @@ -125,15 +121,12 @@ impl PerRelayParentData { peer: &PeerId, validator: &ValidatorId, ) -> bool { - if let Some(set) = self.message_sent_to_peer.get(peer) { - !set.contains(validator) - } else { - false - } + self.message_sent_to_peer.get(peer).map(|v| !v.contains(validator)).unwrap_or(true) + && self.message_received_from_peer.get(peer).map(|v| !v.contains(validator)).unwrap_or(true) } } -const TARGET: &'static str = "bitd"; +const LOG_TARGET: &str = "bitfield_distribution"; /// The bitfield distribution subsystem. pub struct BitfieldDistribution { @@ -147,65 +140,81 @@ impl BitfieldDistribution { } /// Start processing work as passed on from the Overseer. - async fn run(self, mut ctx: Context) -> SubsystemResult<()> + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] + async fn run(self, mut ctx: Context) where Context: SubsystemContext, { // work: process incoming messages from the overseer and process accordingly. let mut state = ProtocolState::default(); loop { - let message = ctx.recv().await?; + let message = match ctx.recv().await { + Ok(message) => message, + Err(e) => { + tracing::debug!(target: LOG_TARGET, err = ?e, "Failed to receive a message from Overseer, exiting"); + return; + }, + }; match message { FromOverseer::Communication { msg: BitfieldDistributionMessage::DistributeBitfield(hash, signed_availability), } => { - trace!(target: TARGET, "Processing DistributeBitfield"); - handle_bitfield_distribution(&mut ctx, &mut state, &self.metrics, hash, signed_availability) - .await?; + tracing::trace!(target: LOG_TARGET, "Processing DistributeBitfield"); + handle_bitfield_distribution( + &mut ctx, + &mut state, + &self.metrics, + hash, + signed_availability, + ).await; } FromOverseer::Communication { msg: BitfieldDistributionMessage::NetworkBridgeUpdateV1(event), } => { - trace!(target: TARGET, "Processing NetworkMessage"); + tracing::trace!(target: LOG_TARGET, "Processing NetworkMessage"); // a network message was received - if let Err(e) = handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await { - warn!(target: TARGET, "Failed to handle incoming network messages: {:?}", e); - } + handle_network_msg(&mut ctx, &mut state, &self.metrics, event).await; } FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated })) => { + let _timer = self.metrics.time_active_leaves_update(); + for relay_parent in activated { - trace!(target: TARGET, "Start {:?}", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "activated"); // query basic system parameters once - if let Some((validator_set, signing_context)) = - query_basics(&mut ctx, relay_parent).await? - { - // If our runtime API fails, we don't take down the node, - // but we might alter peers' reputations erroneously as a result - // of not having the correct bookkeeping. If we have lost a race - // with state pruning, it is unlikely that peers will be sending - // us anything to do with this relay-parent anyway. - let _ = state.per_relay_parent.insert( - relay_parent, - PerRelayParentData { - signing_context, - validator_set, - ..Default::default() - }, - ); + match query_basics(&mut ctx, relay_parent).await { + Ok(Some((validator_set, signing_context))) => { + // If our runtime API fails, we don't take down the node, + // but we might alter peers' reputations erroneously as a result + // of not having the correct bookkeeping. If we have lost a race + // with state pruning, it is unlikely that peers will be sending + // us anything to do with this relay-parent anyway. + let _ = state.per_relay_parent.insert( + relay_parent, + PerRelayParentData { + signing_context, + validator_set, + ..Default::default() + }, + ); + } + Err(e) => { + tracing::warn!(target: LOG_TARGET, err = ?e, "query_basics has failed"); + } + _ => {}, } } for relay_parent in deactivated { - trace!(target: TARGET, "Stop {:?}", relay_parent); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "deactivated"); // defer the cleanup to the view change } } FromOverseer::Signal(OverseerSignal::BlockFinalized(hash)) => { - trace!(target: TARGET, "Block finalized {:?}", hash); + tracing::trace!(target: LOG_TARGET, hash = %hash, "block finalized"); } FromOverseer::Signal(OverseerSignal::Conclude) => { - trace!(target: TARGET, "Conclude"); - return Ok(()); + tracing::trace!(target: LOG_TARGET, "Conclude"); + return; } } } @@ -213,15 +222,16 @@ impl BitfieldDistribution { } /// Modify the reputation of a peer based on its behaviour. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn modify_reputation( ctx: &mut Context, peer: PeerId, rep: ReputationChange, -) -> SubsystemResult<()> +) where Context: SubsystemContext, { - trace!(target: TARGET, "Reputation change of {:?} for peer {:?}", rep, peer); + tracing::trace!(target: LOG_TARGET, rep = ?rep, peer_id = %peer, "reputation change"); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), )) @@ -231,41 +241,44 @@ where /// Distribute a given valid and signature checked bitfield message. /// /// For this variant the source is this node. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_bitfield_distribution( ctx: &mut Context, state: &mut ProtocolState, metrics: &Metrics, relay_parent: Hash, signed_availability: SignedAvailabilityBitfield, -) -> SubsystemResult<()> +) where Context: SubsystemContext, { + let _timer = metrics.time_handle_bitfield_distribution(); + // Ignore anything the overseer did not tell this subsystem to work on let mut job_data = state.per_relay_parent.get_mut(&relay_parent); let job_data: &mut _ = if let Some(ref mut job_data) = job_data { job_data } else { - trace!( - target: TARGET, - "Not supposed to work on relay parent {} related data", - relay_parent + tracing::trace!( + target: LOG_TARGET, + relay_parent = %relay_parent, + "Not supposed to work on relay parent related data", ); - return Ok(()); + return; }; let validator_set = &job_data.validator_set; if validator_set.is_empty() { - trace!(target: TARGET, "Validator set for {:?} is empty", relay_parent); - return Ok(()); + tracing::trace!(target: LOG_TARGET, relay_parent = %relay_parent, "validator set is empty"); + return; } let validator_index = signed_availability.validator_index() as usize; let validator = if let Some(validator) = validator_set.get(validator_index) { validator.clone() } else { - trace!(target: TARGET, "Could not find a validator for index {}", validator_index); - return Ok(()); + tracing::trace!(target: LOG_TARGET, "Could not find a validator for index {}", validator_index); + return; }; let peer_views = &mut state.peer_views; @@ -274,36 +287,36 @@ where signed_availability, }; - relay_message(ctx, job_data, peer_views, validator, msg).await?; + relay_message(ctx, job_data, peer_views, validator, msg).await; metrics.on_own_bitfield_gossipped(); - - Ok(()) } /// Distribute a given valid and signature checked bitfield message. /// /// Can be originated by another subsystem or received via network from another peer. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn relay_message( ctx: &mut Context, job_data: &mut PerRelayParentData, peer_views: &mut HashMap, validator: ValidatorId, message: BitfieldGossipMessage, -) -> SubsystemResult<()> +) where Context: SubsystemContext, { // notify the overseer about a new and valid signed bitfield ctx.send_message(AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData(ProvisionableData::Bitfield( - message.relay_parent.clone(), - message.signed_availability.clone(), - )), + ProvisionerMessage::ProvisionableData( + message.relay_parent, + ProvisionableData::Bitfield( + message.relay_parent, + message.signed_availability.clone(), + ), + ), )) - .await?; - - let message_sent_to_peer = &mut (job_data.message_sent_to_peer); + .await; // pass on the bitfield distribution to all interested peers let interested_peers = peer_views @@ -311,13 +324,18 @@ where .filter_map(|(peer, view)| { // check interest in the peer in this message's relay parent if view.contains(&message.relay_parent) { + let message_needed = job_data.message_from_validator_needed_by_peer(&peer, &validator); // track the message as sent for this peer - message_sent_to_peer + job_data.message_sent_to_peer .entry(peer.clone()) .or_default() .insert(validator.clone()); - Some(peer.clone()) + if message_needed { + Some(peer.clone()) + } else { + None + } } else { None } @@ -325,10 +343,10 @@ where .collect::>(); if interested_peers.is_empty() { - trace!( - target: TARGET, - "No peers are interested in gossip for relay parent {:?}", - message.relay_parent + tracing::trace!( + target: LOG_TARGET, + relay_parent = %message.relay_parent, + "no peers are interested in gossip for relay parent", ); } else { ctx.send_message(AllMessages::NetworkBridge( @@ -337,25 +355,26 @@ where message.into_validation_protocol(), ), )) - .await?; + .await; } - Ok(()) } /// Handle an incoming message from a peer. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut ProtocolState, metrics: &Metrics, origin: PeerId, message: BitfieldGossipMessage, -) -> SubsystemResult<()> +) where Context: SubsystemContext, { // we don't care about this, not part of our view. if !state.view.contains(&message.relay_parent) { - return modify_reputation(ctx, origin, COST_NOT_IN_VIEW).await; + modify_reputation(ctx, origin, COST_NOT_IN_VIEW).await; + return; } // Ignore anything the overseer did not tell this subsystem to work on. @@ -363,17 +382,19 @@ where let job_data: &mut _ = if let Some(ref mut job_data) = job_data { job_data } else { - return modify_reputation(ctx, origin, COST_NOT_IN_VIEW).await; + modify_reputation(ctx, origin, COST_NOT_IN_VIEW).await; + return; }; let validator_set = &job_data.validator_set; if validator_set.is_empty() { - trace!( - target: TARGET, - "Validator set for relay parent {:?} is empty", - &message.relay_parent + tracing::trace!( + target: LOG_TARGET, + relay_parent = %message.relay_parent, + "Validator set is empty", ); - return modify_reputation(ctx, origin, COST_MISSING_PEER_SESSION_KEY).await; + modify_reputation(ctx, origin, COST_MISSING_PEER_SESSION_KEY).await; + return; } // Use the (untrusted) validator index provided by the signed payload @@ -383,7 +404,8 @@ where let validator = if let Some(validator) = validator_set.get(validator_index) { validator.clone() } else { - return modify_reputation(ctx, origin, COST_VALIDATOR_INDEX_INVALID).await; + modify_reputation(ctx, origin, COST_VALIDATOR_INDEX_INVALID).await; + return; }; // Check if the peer already sent us a message for the validator denoted in the message earlier. @@ -397,7 +419,8 @@ where if !received_set.contains(&validator) { received_set.insert(validator.clone()); } else { - return modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await; + modify_reputation(ctx, origin, COST_PEER_DUPLICATE_MESSAGE).await; + return; }; if message @@ -410,17 +433,17 @@ where // only relay_message a message of a validator once if one_per_validator.get(&validator).is_some() { - trace!( - target: TARGET, - "Already received a message for validator at index {}", - validator_index + tracing::trace!( + target: LOG_TARGET, + validator_index, + "already received a message for validator", ); - modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE).await?; - return Ok(()); + modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE).await; + return; } one_per_validator.insert(validator.clone(), message.clone()); - relay_message(ctx, job_data, &mut state.peer_views, validator, message).await?; + relay_message(ctx, job_data, &mut state.peer_views, validator, message).await; modify_reputation(ctx, origin, BENEFIT_VALID_MESSAGE_FIRST).await } else { @@ -430,15 +453,18 @@ where /// Deal with network bridge updates and track what needs to be tracked /// which depends on the message type received. +#[tracing::instrument(level = "trace", skip(ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut ProtocolState, metrics: &Metrics, bridge_message: NetworkBridgeEvent, -) -> SubsystemResult<()> +) where Context: SubsystemContext, { + let _timer = metrics.time_handle_network_msg(); + match bridge_message { NetworkBridgeEvent::PeerConnected(peerid, _role) => { // insert if none already present @@ -449,35 +475,36 @@ where state.peer_views.remove(&peerid); } NetworkBridgeEvent::PeerViewChange(peerid, view) => { - handle_peer_view_change(ctx, state, peerid, view).await?; + handle_peer_view_change(ctx, state, peerid, view).await; } NetworkBridgeEvent::OurViewChange(view) => { - handle_our_view_change(state, view)?; + handle_our_view_change(state, view); } NetworkBridgeEvent::PeerMessage(remote, message) => { match message { protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) => { - trace!(target: TARGET, "Received bitfield gossip from peer {:?}", &remote); + tracing::trace!(target: LOG_TARGET, peer_id = %remote, "received bitfield gossip from peer"); let gossiped_bitfield = BitfieldGossipMessage { relay_parent, signed_availability: bitfield, }; - process_incoming_peer_message(ctx, state, metrics, remote, gossiped_bitfield).await?; + process_incoming_peer_message(ctx, state, metrics, remote, gossiped_bitfield).await; } } } } - Ok(()) } /// Handle the changes necassary when our view changes. -fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemResult<()> { +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] +fn handle_our_view_change(state: &mut ProtocolState, view: View) { let old_view = std::mem::replace(&mut (state.view), view); for added in state.view.difference(&old_view) { if !state.per_relay_parent.contains_key(&added) { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, + added = %added, "Our view contains {} but the overseer never told use we should work on this", &added ); @@ -487,26 +514,22 @@ fn handle_our_view_change(state: &mut ProtocolState, view: View) -> SubsystemRes // cleanup relay parents we are not interested in any more let _ = state.per_relay_parent.remove(&removed); } - Ok(()) } // Send the difference between two views which were not sent // to that particular peer. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( ctx: &mut Context, state: &mut ProtocolState, origin: PeerId, view: View, -) -> SubsystemResult<()> +) where Context: SubsystemContext, { - let current = state.peer_views.entry(origin.clone()).or_default(); - - let added: Vec = view.difference(&*current).cloned().collect(); - - *current = view; + let added = state.peer_views.entry(origin.clone()).or_default().replace_difference(view).cloned().collect::>(); // Send all messages we've seen before and the peer is now interested // in to that peer. @@ -536,31 +559,29 @@ where .collect(); for (validator, message) in delta_set.into_iter() { - send_tracked_gossip_message(ctx, state, origin.clone(), validator, message).await?; + send_tracked_gossip_message(ctx, state, origin.clone(), validator, message).await; } - - Ok(()) } /// Send a gossip message and track it in the per relay parent data. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn send_tracked_gossip_message( ctx: &mut Context, state: &mut ProtocolState, dest: PeerId, validator: ValidatorId, message: BitfieldGossipMessage, -) -> SubsystemResult<()> +) where Context: SubsystemContext, { let job_data = if let Some(job_data) = state.per_relay_parent.get_mut(&message.relay_parent) { job_data } else { - return Ok(()); + return; }; - let message_sent_to_peer = &mut (job_data.message_sent_to_peer); - message_sent_to_peer + job_data.message_sent_to_peer .entry(dest.clone()) .or_default() .insert(validator.clone()); @@ -570,10 +591,7 @@ where vec![dest], message.into_validation_protocol(), ), - )) - .await?; - - Ok(()) + )).await; } impl Subsystem for BitfieldDistribution @@ -582,10 +600,8 @@ where { fn start(self, ctx: C) -> SpawnedSubsystem { let future = self.run(ctx) - .map_err(|e| { - SubsystemError::with_origin("bitfield-distribution", e) - }) - .map(|_| ()).boxed(); + .map(|_| Ok(())) + .boxed(); SpawnedSubsystem { name: "bitfield-distribution-subsystem", @@ -595,6 +611,7 @@ where } /// Query our validator set and signing context for a particular relay parent. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn query_basics( ctx: &mut Context, relay_parent: Hash, @@ -616,7 +633,7 @@ where )); ctx.send_messages(std::iter::once(query_validators).chain(std::iter::once(query_signing))) - .await?; + .await; match (validators_rx.await?, session_rx.await?) { (Ok(v), Ok(s)) => Ok(Some(( @@ -624,7 +641,7 @@ where SigningContext { parent_hash: relay_parent, session_index: s }, ))), (Err(e), _) | (_, Err(e)) => { - warn!(target: TARGET, "Failed to fetch basics from runtime API: {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to fetch basics from runtime API"); Ok(None) } } @@ -634,6 +651,9 @@ where struct MetricsInner { gossipped_own_availability_bitfields: prometheus::Counter, received_availability_bitfields: prometheus::Counter, + active_leaves_update: prometheus::Histogram, + handle_bitfield_distribution: prometheus::Histogram, + handle_network_msg: prometheus::Histogram, } /// Bitfield Distribution metrics. @@ -652,6 +672,21 @@ impl Metrics { metrics.received_availability_bitfields.inc(); } } + + /// Provide a timer for `active_leaves_update` which observes on drop. + fn time_active_leaves_update(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.active_leaves_update.start_timer()) + } + + /// Provide a timer for `handle_bitfield_distribution` which observes on drop. + fn time_handle_bitfield_distribution(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_bitfield_distribution.start_timer()) + } + + /// Provide a timer for `handle_network_msg` which observes on drop. + fn time_handle_network_msg(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_network_msg.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -671,6 +706,33 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + active_leaves_update: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_bitfield_distribution_active_leaves_update", + "Time spent within `bitfield_distribution::active_leaves_update`", + ) + )?, + registry, + )?, + handle_bitfield_distribution: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_bitfield_distribution_handle_bitfield_distribution", + "Time spent within `bitfield_distribution::handle_bitfield_distribution`", + ) + )?, + registry, + )?, + handle_network_msg: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_bitfield_distribution_handle_network_msg", + "Time spent within `bitfield_distribution::handle_network_msg`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } @@ -688,7 +750,7 @@ mod test { use polkadot_node_subsystem_util::TimeoutExt; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; use sp_application_crypto::AppKey; - use sc_keystore::LocalKeystore; + use sp_keystore::testing::KeyStore; use std::sync::Arc; use std::time::Duration; use assert_matches::assert_matches; @@ -700,19 +762,12 @@ mod test { ]; } - macro_rules! peers { - ( $( $peer:expr ),* $(,)? ) => [ - vec![ $( $peer.clone() ),* ] - ]; - } - macro_rules! launch { ($fut:expr) => { $fut .timeout(Duration::from_millis(10)) .await .expect("10ms is more than enough for sending messages.") - .expect("Error values should really never occur.") }; } @@ -750,7 +805,6 @@ mod test { fn state_with_view( view: View, relay_parent: Hash, - keystore_path: &tempfile::TempDir, ) -> (ProtocolState, SigningContext, SyncCryptoStorePtr, ValidatorId) { let mut state = ProtocolState::default(); @@ -759,8 +813,7 @@ mod test { parent_hash: relay_parent.clone(), }; - let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore : SyncCryptoStorePtr = Arc::new(KeyStore::new()); let validator = SyncCryptoStore::sr25519_generate_new(&*keystore, ValidatorId::ID, None) .expect("generating sr25519 key not to fail"); @@ -799,18 +852,20 @@ mod test { }; // another validator not part of the validatorset - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore : SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) - .expect("Creates keystore")); + let keystore : SyncCryptoStorePtr = Arc::new(KeyStore::new()); let malicious = SyncCryptoStore::sr25519_generate_new(&*keystore, ValidatorId::ID, None) .expect("Malicious key created"); let validator = SyncCryptoStore::sr25519_generate_new(&*keystore, ValidatorId::ID, None) .expect("Malicious key created"); let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); - let signed = - executor::block_on(Signed::::sign(&keystore, payload, &signing_context, 0, &malicious.into())) - .expect("should be signed"); + let signed = executor::block_on(Signed::::sign( + &keystore, + payload, + &signing_context, + 0, + &malicious.into(), + )).expect("should be signed"); let msg = BitfieldGossipMessage { relay_parent: hash_a.clone(), @@ -863,17 +918,19 @@ mod test { let peer_b = PeerId::random(); assert_ne!(peer_a, peer_b); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); // validator 0 key pair - let (mut state, signing_context, keystore, validator) = - state_with_view(view![hash_a, hash_b], hash_a.clone(), &keystore_path); + let (mut state, signing_context, keystore, validator) = state_with_view(view![hash_a, hash_b], hash_a.clone()); state.peer_views.insert(peer_b.clone(), view![hash_a]); let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); - let signed = - executor::block_on(Signed::::sign(&keystore, payload, &signing_context, 42, &validator)) - .expect("should be signed"); + let signed = executor::block_on(Signed::::sign( + &keystore, + payload, + &signing_context, + 42, + &validator, + )).expect("should be signed"); let msg = BitfieldGossipMessage { relay_parent: hash_a.clone(), @@ -919,16 +976,18 @@ mod test { let peer_b = PeerId::random(); assert_ne!(peer_a, peer_b); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); // validator 0 key pair - let (mut state, signing_context, keystore, validator) = - state_with_view(view![hash_a, hash_b], hash_a.clone(), &keystore_path); + let (mut state, signing_context, keystore, validator) = state_with_view(view![hash_a, hash_b], hash_a.clone()); // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); - let signed_bitfield = - executor::block_on(Signed::::sign(&keystore, payload, &signing_context, 0, &validator)) - .expect("should be signed"); + let signed_bitfield = executor::block_on(Signed::::sign( + &keystore, + payload, + &signing_context, + 0, + &validator, + )).expect("should be signed"); let msg = BitfieldGossipMessage { relay_parent: hash_a.clone(), @@ -957,6 +1016,7 @@ mod test { assert_matches!( handle.recv().await, AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + _, ProvisionableData::Bitfield(hash, signed) )) => { assert_eq!(hash, hash_a); @@ -1018,6 +1078,101 @@ mod test { }); } + #[test] + fn do_not_relay_message_twice() { + let _ = env_logger::builder() + .filter(None, log::LevelFilter::Trace) + .is_test(true) + .try_init(); + + let hash = Hash::random(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + assert_ne!(peer_a, peer_b); + + // validator 0 key pair + let (mut state, signing_context, keystore, validator) = state_with_view(view![hash], hash.clone()); + + // create a signed message by validator 0 + let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); + let signed_bitfield = executor::block_on(Signed::::sign( + &keystore, + payload, + &signing_context, + 0, + &validator, + )).expect("should be signed"); + + state.peer_views.insert(peer_b.clone(), view![hash]); + state.peer_views.insert(peer_a.clone(), view![hash]); + + let msg = BitfieldGossipMessage { + relay_parent: hash.clone(), + signed_availability: signed_bitfield.clone(), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = + make_subsystem_context::(pool); + + executor::block_on(async move { + relay_message( + &mut ctx, + state.per_relay_parent.get_mut(&hash).unwrap(), + &mut state.peer_views, + validator.clone(), + msg.clone(), + ).await; + + assert_matches!( + handle.recv().await, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::Bitfield(h, signed) + )) => { + assert_eq!(h, hash); + assert_eq!(signed, signed_bitfield) + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(peers, send_msg), + ) => { + assert_eq!(2, peers.len()); + assert!(peers.contains(&peer_a)); + assert!(peers.contains(&peer_b)); + assert_eq!(send_msg, msg.clone().into_validation_protocol()); + } + ); + + // Relaying the message a second time shouldn't work. + relay_message( + &mut ctx, + state.per_relay_parent.get_mut(&hash).unwrap(), + &mut state.peer_views, + validator.clone(), + msg.clone(), + ).await; + + assert_matches!( + handle.recv().await, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::Bitfield(h, signed) + )) => { + assert_eq!(h, hash); + assert_eq!(signed, signed_bitfield) + } + ); + + // There shouldn't be any other message + assert!(handle.recv().timeout(Duration::from_millis(10)).await.is_none()); + }); + } + #[test] fn changing_view() { let _ = env_logger::builder() @@ -1032,16 +1187,18 @@ mod test { let peer_b = PeerId::random(); assert_ne!(peer_a, peer_b); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); // validator 0 key pair - let (mut state, signing_context, keystore, validator) = - state_with_view(view![hash_a, hash_b], hash_a.clone(), &keystore_path); + let (mut state, signing_context, keystore, validator) = state_with_view(view![hash_a, hash_b], hash_a.clone()); // create a signed message by validator 0 let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); - let signed_bitfield = - executor::block_on(Signed::::sign(&keystore, payload, &signing_context, 0, &validator)) - .expect("should be signed"); + let signed_bitfield = executor::block_on(Signed::::sign( + &keystore, + payload, + &signing_context, + 0, + &validator, + )).expect("should be signed"); let msg = BitfieldGossipMessage { relay_parent: hash_a.clone(), @@ -1085,6 +1242,7 @@ mod test { assert_matches!( handle.recv().await, AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + _, ProvisionableData::Bitfield(hash, signed) )) => { assert_eq!(hash, hash_a); @@ -1092,17 +1250,6 @@ mod test { } ); - // gossip to the network - assert_matches!( - handle.recv().await, - AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage ( - peers, out_msg, - )) => { - assert_eq!(peers, peers![peer_b]); - assert_eq!(out_msg, msg.clone().into_validation_protocol()); - } - ); - // reputation change for peer B assert_matches!( handle.recv().await, @@ -1185,4 +1332,88 @@ mod test { }); } + + #[test] + fn do_not_send_message_back_to_origin() { + let _ = env_logger::builder() + .filter(None, log::LevelFilter::Trace) + .is_test(true) + .try_init(); + + let hash: Hash = [0; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + assert_ne!(peer_a, peer_b); + + // validator 0 key pair + let (mut state, signing_context, keystore, validator) = state_with_view(view![hash], hash); + + // create a signed message by validator 0 + let payload = AvailabilityBitfield(bitvec![bitvec::order::Lsb0, u8; 1u8; 32]); + let signed_bitfield = executor::block_on(Signed::::sign( + &keystore, + payload, + &signing_context, + 0, + &validator, + )).expect("should be signed"); + + state.peer_views.insert(peer_b.clone(), view![hash]); + state.peer_views.insert(peer_a.clone(), view![hash]); + + let msg = BitfieldGossipMessage { + relay_parent: hash.clone(), + signed_availability: signed_bitfield.clone(), + }; + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = + make_subsystem_context::(pool); + + executor::block_on(async move { + // send a first message + launch!(handle_network_msg( + &mut ctx, + &mut state, + &Default::default(), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(), + ), + )); + + assert_matches!( + handle.recv().await, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::Bitfield(hash, signed) + )) => { + assert_eq!(hash, hash); + assert_eq!(signed, signed_bitfield) + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendValidationMessage(peers, send_msg), + ) => { + assert_eq!(1, peers.len()); + assert!(peers.contains(&peer_a)); + assert_eq!(send_msg, msg.clone().into_validation_protocol()); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_b); + assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST) + } + ); + }); + } } diff --git a/node/network/bridge/Cargo.toml b/node/network/bridge/Cargo.toml index 32b41133574c4062d172abd59af2fad9f5c5972e..f7808cd8f2a0599377737bf1914063495d961db1 100644 --- a/node/network/bridge/Cargo.toml +++ b/node/network/bridge/Cargo.toml @@ -5,20 +5,20 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -async-trait = "0.1" -futures = "0.3.5" -log = "0.4.11" +async-trait = "0.1.42" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" polkadot-primitives = { path = "../../../primitives" } -parity-scale-codec = "1.3.4" -sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-network-protocol = { path = "../protocol" } [dev-dependencies] -assert_matches = "1.3.0" -parking_lot = "0.10.0" +assert_matches = "1.4.0" +parking_lot = "0.11.1" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/network/bridge/src/lib.rs b/node/network/bridge/src/lib.rs index 86098b4477e60a194c66ec66abe46f781604f8e3..38cbe369ba41464549a809df88e51005b30506a6 100644 --- a/node/network/bridge/src/lib.rs +++ b/node/network/bridge/src/lib.rs @@ -16,7 +16,7 @@ //! The Network Bridge Subsystem - protocol multiplexer for Polkadot. -#![deny(unused_crate_dependencies, unused_results)] +#![deny(unused_crate_dependencies)] #![warn(missing_docs)] @@ -24,10 +24,9 @@ use parity_scale_codec::{Encode, Decode}; use futures::prelude::*; use futures::future::BoxFuture; use futures::stream::BoxStream; -use futures::channel::{mpsc, oneshot}; +use futures::channel::mpsc; use sc_network::Event as NetworkEvent; -use sp_runtime::ConsensusEngineId; use polkadot_subsystem::{ ActiveLeavesUpdate, FromOverseer, OverseerSignal, Subsystem, SubsystemContext, SpawnedSubsystem, SubsystemError, @@ -56,12 +55,8 @@ mod validator_discovery; /// We use the same limit to compute the view sent to peers locally. const MAX_VIEW_HEADS: usize = 5; -/// The engine ID of the validation protocol. -pub const VALIDATION_PROTOCOL_ID: ConsensusEngineId = *b"pvn1"; /// The protocol name for the validation peer-set. pub const VALIDATION_PROTOCOL_NAME: &'static str = "/polkadot/validation/1"; -/// The engine ID of the collation protocol. -pub const COLLATION_PROTOCOL_ID: ConsensusEngineId = *b"pcn1"; /// The protocol name for the collation peer-set. pub const COLLATION_PROTOCOL_NAME: &'static str = "/polkadot/collation/1"; @@ -73,7 +68,7 @@ const MALFORMED_VIEW_COST: ReputationChange = ReputationChange::new(-500, "Malformed view"); // network bridge log target -const TARGET: &'static str = "network_bridge"; +const LOG_TARGET: &'static str = "network_bridge"; /// Messages received on the network. #[derive(Debug, Encode, Decode, Clone)] @@ -88,10 +83,10 @@ pub enum WireMessage { /// Information about the notifications protocol. Should be used during network configuration /// or shortly after startup to register the protocol with the network service. -pub fn notifications_protocol_info() -> Vec<(ConsensusEngineId, std::borrow::Cow<'static, str>)> { +pub fn notifications_protocol_info() -> Vec> { vec![ - (VALIDATION_PROTOCOL_ID, VALIDATION_PROTOCOL_NAME.into()), - (COLLATION_PROTOCOL_ID, COLLATION_PROTOCOL_NAME.into()), + VALIDATION_PROTOCOL_NAME.into(), + COLLATION_PROTOCOL_NAME.into(), ] } @@ -108,8 +103,8 @@ pub enum NetworkAction { pub trait Network: Send + 'static { /// Get a stream of all events occurring on the network. This may include events unrelated /// to the Polkadot protocol - the user of this function should filter only for events related - /// to the [`VALIDATION_PROTOCOL_ID`](VALIDATION_PROTOCOL_ID) - /// or [`COLLATION_PROTOCOL_ID`](COLLATION_PROTOCOL_ID) + /// to the [`VALIDATION_PROTOCOL_NAME`](VALIDATION_PROTOCOL_NAME) + /// or [`COLLATION_PROTOCOL_NAME`](COLLATION_PROTOCOL_NAME) fn event_stream(&mut self) -> BoxStream<'static, NetworkEvent>; /// Get access to an underlying sink for all network actions. @@ -141,6 +136,7 @@ impl Network for Arc> { sc_network::NetworkService::event_stream(self, "polkadot-network-bridge").boxed() } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn action_sink<'a>(&'a mut self) -> Pin + Send + 'a>> { @@ -158,20 +154,23 @@ impl Network for Arc> { fn start_send(self: Pin<&mut Self>, action: NetworkAction) -> SubsystemResult<()> { match action { - NetworkAction::ReputationChange(peer, cost_benefit) => self.0.report_peer( - peer, - cost_benefit, - ), + NetworkAction::ReputationChange(peer, cost_benefit) => { + tracing::debug!(target: LOG_TARGET, "Changing reputation: {:?} for {}", cost_benefit, peer); + self.0.report_peer( + peer, + cost_benefit, + ) + } NetworkAction::WriteNotification(peer, peer_set, message) => { match peer_set { PeerSet::Validation => self.0.write_notification( peer, - VALIDATION_PROTOCOL_ID, + VALIDATION_PROTOCOL_NAME.into(), message, ), PeerSet::Collation => self.0.write_notification( peer, - COLLATION_PROTOCOL_ID, + COLLATION_PROTOCOL_NAME.into(), message, ), } @@ -231,7 +230,6 @@ impl Subsystem for NetworkBridge .map_err(|e| { SubsystemError::with_origin("network-bridge", e) }) - .map(|_| ()) .boxed(); SpawnedSubsystem { name: "network-bridge-subsystem", @@ -252,7 +250,6 @@ enum Action { ConnectToValidators { validator_ids: Vec, connected: mpsc::Sender<(AuthorityDiscoveryId, PeerId)>, - revoke: oneshot::Receiver<()>, }, ReportPeer(PeerId, ReputationChange), @@ -270,6 +267,7 @@ enum Action { Nop, } +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn action_from_overseer_message( res: polkadot_subsystem::SubsystemResult>, ) -> Action { @@ -283,50 +281,48 @@ fn action_from_overseer_message( => Action::SendValidationMessage(peers, msg), NetworkBridgeMessage::SendCollationMessage(peers, msg) => Action::SendCollationMessage(peers, msg), - NetworkBridgeMessage::ConnectToValidators { - validator_ids, - connected, - revoke, - } => Action::ConnectToValidators { validator_ids, connected, revoke }, + NetworkBridgeMessage::ConnectToValidators { validator_ids, connected } + => Action::ConnectToValidators { validator_ids, connected }, }, Ok(FromOverseer::Signal(OverseerSignal::BlockFinalized(_))) => Action::Nop, Err(e) => { - log::warn!(target: TARGET, "Shutting down Network Bridge due to error {:?}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "Shutting down Network Bridge due to error"); Action::Abort } } } +#[tracing::instrument(level = "trace", fields(subsystem = LOG_TARGET))] fn action_from_network_message(event: Option) -> Action { match event { None => { - log::info!(target: TARGET, "Shutting down Network Bridge: underlying event stream concluded"); + tracing::info!(target: LOG_TARGET, "Shutting down Network Bridge: underlying event stream concluded"); Action::Abort } Some(NetworkEvent::Dht(_)) => Action::Nop, - Some(NetworkEvent::NotificationStreamOpened { remote, engine_id, role }) => { + Some(NetworkEvent::NotificationStreamOpened { remote, protocol, role }) => { let role = role.into(); - match engine_id { - x if x == VALIDATION_PROTOCOL_ID + match protocol { + x if x == VALIDATION_PROTOCOL_NAME => Action::PeerConnected(PeerSet::Validation, remote, role), - x if x == COLLATION_PROTOCOL_ID + x if x == COLLATION_PROTOCOL_NAME => Action::PeerConnected(PeerSet::Collation, remote, role), _ => Action::Nop, } } - Some(NetworkEvent::NotificationStreamClosed { remote, engine_id }) => { - match engine_id { - x if x == VALIDATION_PROTOCOL_ID + Some(NetworkEvent::NotificationStreamClosed { remote, protocol }) => { + match protocol { + x if x == VALIDATION_PROTOCOL_NAME => Action::PeerDisconnected(PeerSet::Validation, remote), - x if x == COLLATION_PROTOCOL_ID + x if x == COLLATION_PROTOCOL_NAME => Action::PeerDisconnected(PeerSet::Collation, remote), _ => Action::Nop, } } Some(NetworkEvent::NotificationsReceived { remote, messages }) => { let v_messages: Result, _> = messages.iter() - .filter(|(engine_id, _)| engine_id == &VALIDATION_PROTOCOL_ID) + .filter(|(protocol, _)| protocol == &VALIDATION_PROTOCOL_NAME) .map(|(_, msg_bytes)| WireMessage::decode(&mut msg_bytes.as_ref())) .collect(); @@ -336,7 +332,7 @@ fn action_from_network_message(event: Option) -> Action { }; let c_messages: Result, _> = messages.iter() - .filter(|(engine_id, _)| engine_id == &COLLATION_PROTOCOL_ID) + .filter(|(protocol, _)| protocol == &COLLATION_PROTOCOL_NAME) .map(|(_, msg_bytes)| WireMessage::decode(&mut msg_bytes.as_ref())) .collect(); @@ -356,6 +352,7 @@ fn construct_view(live_heads: &[Hash]) -> View { View(live_heads.iter().rev().take(MAX_VIEW_HEADS).cloned().collect()) } +#[tracing::instrument(level = "trace", skip(net, ctx, validation_peers, collation_peers), fields(subsystem = LOG_TARGET))] async fn update_view( net: &mut impl Network, ctx: &mut impl SubsystemContext, @@ -381,27 +378,22 @@ async fn update_view( WireMessage::ViewUpdate(new_view.clone()), ).await?; - if let Err(e) = dispatch_validation_event_to_all( + dispatch_validation_event_to_all( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, - ).await { - log::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); - return Err(e) - } + ).await; - if let Err(e) = dispatch_collation_event_to_all( + dispatch_collation_event_to_all( NetworkBridgeEvent::OurViewChange(new_view.clone()), ctx, - ).await { - log::warn!(target: TARGET, "Aborting - Failure to dispatch messages to overseer"); - return Err(e) - } + ).await; Ok(()) } // Handle messages on a specific peer-set. The peer is expected to be connected on that // peer-set. +#[tracing::instrument(level = "trace", skip(peers, messages, net), fields(subsystem = LOG_TARGET))] async fn handle_peer_messages( peer: PeerId, peers: &mut HashMap, @@ -448,6 +440,7 @@ async fn handle_peer_messages( Ok(outgoing_messages) } +#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] async fn send_validation_message( net: &mut impl Network, peers: I, @@ -460,6 +453,7 @@ async fn send_validation_message( send_message(net, peers, PeerSet::Validation, message).await } +#[tracing::instrument(level = "trace", skip(net, peers), fields(subsystem = LOG_TARGET))] async fn send_collation_message( net: &mut impl Network, peers: I, @@ -511,21 +505,22 @@ async fn send_message( async fn dispatch_validation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl SubsystemContext, -) -> SubsystemResult<()> { +) { dispatch_validation_events_to_all(std::iter::once(event), ctx).await } async fn dispatch_collation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl SubsystemContext, -) -> SubsystemResult<()> { +) { dispatch_collation_events_to_all(std::iter::once(event), ctx).await } +#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_validation_events_to_all( events: I, ctx: &mut impl SubsystemContext, -) -> SubsystemResult<()> +) where I: IntoIterator>, I::IntoIter: Send, @@ -553,10 +548,11 @@ async fn dispatch_validation_events_to_all( ctx.send_messages(events.into_iter().flat_map(messages_for)).await } +#[tracing::instrument(level = "trace", skip(events, ctx), fields(subsystem = LOG_TARGET))] async fn dispatch_collation_events_to_all( events: I, ctx: &mut impl SubsystemContext, -) -> SubsystemResult<()> +) where I: IntoIterator>, I::IntoIter: Send, @@ -570,6 +566,7 @@ async fn dispatch_collation_events_to_all( ctx.send_messages(events.into_iter().flat_map(messages_for)).await } +#[tracing::instrument(skip(network_service, authority_discovery_service, ctx), fields(subsystem = LOG_TARGET))] async fn run_network( mut network_service: N, mut authority_discovery_service: AD, @@ -624,12 +621,10 @@ where Action::ConnectToValidators { validator_ids, connected, - revoke, } => { let (ns, ads) = validator_discovery.on_request( validator_ids, connected, - revoke, network_service, authority_discovery_service, ).await; @@ -668,7 +663,7 @@ where view: View(Vec::new()), }); - let res = match peer_set { + match peer_set { PeerSet::Validation => dispatch_validation_events_to_all( vec![ NetworkBridgeEvent::PeerConnected(peer.clone(), role), @@ -689,11 +684,6 @@ where ], &mut ctx, ).await, - }; - - if let Err(e) = res { - log::warn!("Aborting - Failure to dispatch messages to overseer"); - return Err(e); } } } @@ -704,10 +694,10 @@ where PeerSet::Collation => &mut collation_peers, }; - validator_discovery.on_peer_disconnected(&peer, &mut authority_discovery_service).await; + validator_discovery.on_peer_disconnected(&peer); if peer_map.remove(&peer).is_some() { - let res = match peer_set { + match peer_set { PeerSet::Validation => dispatch_validation_event_to_all( NetworkBridgeEvent::PeerDisconnected(peer), &mut ctx, @@ -716,14 +706,6 @@ where NetworkBridgeEvent::PeerDisconnected(peer), &mut ctx, ).await, - }; - - if let Err(e) = res { - log::warn!( - target: TARGET, - "Aborting - Failure to dispatch messages to overseer", - ); - return Err(e) } } }, @@ -736,16 +718,7 @@ where &mut network_service, ).await?; - if let Err(e) = dispatch_validation_events_to_all( - events, - &mut ctx, - ).await { - log::warn!( - target: TARGET, - "Aborting - Failure to dispatch messages to overseer", - ); - return Err(e) - } + dispatch_validation_events_to_all(events, &mut ctx).await; } if !c_messages.is_empty() { @@ -756,16 +729,7 @@ where &mut network_service, ).await?; - if let Err(e) = dispatch_collation_events_to_all( - events, - &mut ctx, - ).await { - log::warn!( - target: TARGET, - "Aborting - Failure to dispatch messages to overseer", - ); - return Err(e) - } + dispatch_collation_events_to_all(events, &mut ctx).await; } }, } @@ -828,10 +792,10 @@ mod tests { ) } - fn peer_set_engine_id(peer_set: PeerSet) -> ConsensusEngineId { + fn peer_set_protocol(peer_set: PeerSet) -> std::borrow::Cow<'static, str> { match peer_set { - PeerSet::Validation => VALIDATION_PROTOCOL_ID, - PeerSet::Collation => COLLATION_PROTOCOL_ID, + PeerSet::Validation => VALIDATION_PROTOCOL_NAME.into(), + PeerSet::Collation => COLLATION_PROTOCOL_NAME.into(), } } @@ -891,7 +855,7 @@ mod tests { async fn connect_peer(&mut self, peer: PeerId, peer_set: PeerSet, role: ObservedRole) { self.send_network_event(NetworkEvent::NotificationStreamOpened { remote: peer, - engine_id: peer_set_engine_id(peer_set), + protocol: peer_set_protocol(peer_set), role: role.into(), }).await; } @@ -899,14 +863,14 @@ mod tests { async fn disconnect_peer(&mut self, peer: PeerId, peer_set: PeerSet) { self.send_network_event(NetworkEvent::NotificationStreamClosed { remote: peer, - engine_id: peer_set_engine_id(peer_set), + protocol: peer_set_protocol(peer_set), }).await; } async fn peer_message(&mut self, peer: PeerId, peer_set: PeerSet, message: Vec) { self.send_network_event(NetworkEvent::NotificationsReceived { remote: peer, - messages: vec![(peer_set_engine_id(peer_set), message.into())], + messages: vec![(peer_set_protocol(peer_set), message.into())], }).await; } diff --git a/node/network/bridge/src/validator_discovery.rs b/node/network/bridge/src/validator_discovery.rs index 531e41cd629d0445ca4265d208955ec6e5252514..71a3d4a566da1fe60eef451f1c3cac6f87adabf5 100644 --- a/node/network/bridge/src/validator_discovery.rs +++ b/node/network/bridge/src/validator_discovery.rs @@ -21,14 +21,15 @@ use std::collections::{HashSet, HashMap, hash_map}; use std::sync::Arc; use async_trait::async_trait; -use futures::channel::{mpsc, oneshot}; +use futures::channel::mpsc; -use sc_network::Multiaddr; +use sc_network::multiaddr::{Multiaddr, Protocol}; use sc_authority_discovery::Service as AuthorityDiscoveryService; use polkadot_node_network_protocol::PeerId; use polkadot_primitives::v1::{AuthorityDiscoveryId, Block, Hash}; const PRIORITY_GROUP: &'static str = "parachain_validators"; +const LOG_TARGET: &str = "validator_discovery"; /// An abstraction over networking for the purposes of validator discovery service. #[async_trait] @@ -70,13 +71,11 @@ impl AuthorityDiscovery for AuthorityDiscoveryService { } } - /// This struct tracks the state for one `ConnectToValidators` request. struct NonRevokedConnectionRequestState { requested: Vec, pending: HashSet, sender: mpsc::Sender<(AuthorityDiscoveryId, PeerId)>, - revoke: oneshot::Receiver<()>, } impl NonRevokedConnectionRequestState { @@ -85,13 +84,11 @@ impl NonRevokedConnectionRequestState { requested: Vec, pending: HashSet, sender: mpsc::Sender<(AuthorityDiscoveryId, PeerId)>, - revoke: oneshot::Receiver<()>, ) -> Self { Self { requested, pending, sender, - revoke, } } @@ -105,9 +102,7 @@ impl NonRevokedConnectionRequestState { /// Returns `true` if the request is revoked. pub fn is_revoked(&mut self) -> bool { - self.revoke - .try_recv() - .map_or(true, |r| r.is_some()) + self.sender.is_closed() } pub fn requested(&self) -> &[AuthorityDiscoveryId] { @@ -115,30 +110,84 @@ impl NonRevokedConnectionRequestState { } } +/// Will be called by [`Service::on_request`] when a request was revoked. +/// +/// Takes the `map` of requested validators and the `id` of the validator that should be revoked. +/// +/// Returns `Some(id)` iff the request counter is `0`. +fn on_revoke(map: &mut HashMap, id: AuthorityDiscoveryId) -> Option { + if let hash_map::Entry::Occupied(mut entry) = map.entry(id) { + if entry.get_mut().saturating_sub(1) == 0 { + return Some(entry.remove_entry().0); + } + } + + None +} + +fn peer_id_from_multiaddr(addr: &Multiaddr) -> Option { + addr.iter().last().and_then(|protocol| if let Protocol::P2p(multihash) = protocol { + PeerId::from_multihash(multihash).ok() + } else { + None + }) +} pub(super) struct Service { - // we assume one PeerId per AuthorityId is enough - connected_validators: HashMap, - // the `u64` counts the number of pending non-revoked requests for this validator + // Peers that are connected to us and authority ids associated to them. + connected_peers: HashMap>, + // The `u64` counts the number of pending non-revoked requests for this validator // note: the validators in this map are not necessarily present // in the `connected_validators` map. // Invariant: the value > 0 for non-revoked requests. requested_validators: HashMap, non_revoked_discovery_requests: Vec, // PhantomData used to make the struct generic instead of having generic methods - network: PhantomData, - authority_discovery: PhantomData, + _phantom: PhantomData<(N, AD)>, } impl Service { pub fn new() -> Self { Self { - connected_validators: HashMap::new(), + connected_peers: HashMap::new(), requested_validators: HashMap::new(), non_revoked_discovery_requests: Vec::new(), - network: PhantomData, - authority_discovery: PhantomData, + _phantom: PhantomData, + } + } + + /// Find connected validators using the given `validator_ids`. + /// + /// Returns a [`HashMap`] that contains the found [`AuthorityDiscoveryId`]'s and their associated [`PeerId`]'s. + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] + async fn find_connected_validators( + &mut self, + validator_ids: &[AuthorityDiscoveryId], + authority_discovery_service: &mut AD, + ) -> HashMap { + let mut result = HashMap::new(); + + for id in validator_ids { + // First check if we already cached the validator + if let Some(pid) = self.connected_peers + .iter() + .find_map(|(pid, ids)| if ids.contains(&id) { Some(pid) } else { None }) { + result.insert(id.clone(), pid.clone()); + continue; + } + + // If not ask the authority discovery + if let Some(addresses) = authority_discovery_service.get_addresses_by_authority_id(id.clone()).await { + for peer_id in addresses.iter().filter_map(peer_id_from_multiaddr) { + if let Some(ids) = self.connected_peers.get_mut(&peer_id) { + ids.insert(id.clone()); + result.insert(id.clone(), peer_id.clone()); + } + } + } } + + result } /// On a new connection request, a priority group update will be issued. @@ -146,49 +195,25 @@ impl Service { /// from them at least until all the pending requests containing them are revoked. /// /// This method will also clean up all previously revoked requests. - // it takes `network_service` and `authority_discovery_service` by value - // and returns them as a workaround for the Future: Send requirement imposed by async fn impl. + /// it takes `network_service` and `authority_discovery_service` by value + /// and returns them as a workaround for the Future: Send requirement imposed by async fn impl. + #[tracing::instrument(level = "trace", skip(self, connected, network_service, authority_discovery_service), fields(subsystem = LOG_TARGET))] pub async fn on_request( &mut self, validator_ids: Vec, mut connected: mpsc::Sender<(AuthorityDiscoveryId, PeerId)>, - revoke: oneshot::Receiver<()>, mut network_service: N, mut authority_discovery_service: AD, ) -> (N, AD) { const MAX_ADDR_PER_PEER: usize = 3; - let already_connected = validator_ids.iter() - .cloned() - .filter_map(|id| { - let counter = self.requested_validators.entry(id.clone()).or_default(); - // if the counter overflows, there is something really wrong going on - *counter += 1; - - self.connected_validators - .get(&id) - .map(|peer| (id, peer.clone())) - }); - - - let on_revoke = |map: &mut HashMap, id: AuthorityDiscoveryId| -> Option { - match map.entry(id) { - hash_map::Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if *entry.get() == 0 { - return Some(entry.remove_entry().0); - } - } - hash_map::Entry::Vacant(_) => { - // should be unreachable - } - } - None - }; + // Increment the counter of how many times the validators were requested. + validator_ids.iter().for_each(|id| *self.requested_validators.entry(id.clone()).or_default() += 1); + let already_connected = self.find_connected_validators(&validator_ids, &mut authority_discovery_service).await; // try to send already connected peers - for (id, peer) in already_connected { - match connected.try_send((id, peer)) { + for (id, peer) in already_connected.iter() { + match connected.try_send((id.clone(), peer.clone())) { Err(e) if e.is_disconnected() => { // the request is already revoked for peer_id in validator_ids { @@ -208,17 +233,15 @@ impl Service { // collect multiaddress of validators let mut multiaddr_to_add = HashSet::new(); - for authority in validator_ids.iter().cloned() { - let result = authority_discovery_service.get_addresses_by_authority_id(authority).await; + for authority in validator_ids.iter() { + let result = authority_discovery_service.get_addresses_by_authority_id(authority.clone()).await; if let Some(addresses) = result { // We might have several `PeerId`s per `AuthorityId` // depending on the number of sentry nodes, // so we limit the max number of sentries per node to connect to. // They are going to be removed soon though: // https://github.com/paritytech/substrate/issues/6845 - for addr in addresses.into_iter().take(MAX_ADDR_PER_PEER) { - let _ = multiaddr_to_add.insert(addr); - } + multiaddr_to_add.extend(addresses.into_iter().take(MAX_ADDR_PER_PEER)); } } @@ -246,9 +269,7 @@ impl Service { for id in revoked_validators.into_iter() { let result = authority_discovery_service.get_addresses_by_authority_id(id).await; if let Some(addresses) = result { - for addr in addresses.into_iter().take(MAX_ADDR_PER_PEER) { - let _ = multiaddr_to_remove.insert(addr); - } + multiaddr_to_remove.extend(addresses.into_iter()); } } @@ -258,26 +279,27 @@ impl Service { PRIORITY_GROUP.to_owned(), multiaddr_to_add, ).await { - log::warn!(target: super::TARGET, "AuthorityDiscoveryService returned an invalid multiaddress: {}", e); + tracing::warn!(target: LOG_TARGET, err = ?e, "AuthorityDiscoveryService returned an invalid multiaddress"); } // the addresses are known to be valid let _ = network_service.remove_from_priority_group(PRIORITY_GROUP.to_owned(), multiaddr_to_remove).await; let pending = validator_ids.iter() .cloned() - .filter(|id| !self.connected_validators.contains_key(id)) + .filter(|id| !already_connected.contains_key(id)) .collect::>(); self.non_revoked_discovery_requests.push(NonRevokedConnectionRequestState::new( validator_ids, pending, connected, - revoke, )); (network_service, authority_discovery_service) } + /// Should be called when a peer connected. + #[tracing::instrument(level = "trace", skip(self, authority_discovery_service), fields(subsystem = LOG_TARGET))] pub async fn on_peer_connected(&mut self, peer_id: &PeerId, authority_discovery_service: &mut AD) { // check if it's an authority we've been waiting for let maybe_authority = authority_discovery_service.get_authority_id_by_peer_id(peer_id.clone()).await; @@ -285,15 +307,16 @@ impl Service { for request in self.non_revoked_discovery_requests.iter_mut() { let _ = request.on_authority_connected(&authority, peer_id); } - let _ = self.connected_validators.insert(authority, peer_id.clone()); + + self.connected_peers.entry(peer_id.clone()).or_default().insert(authority); + } else { + self.connected_peers.insert(peer_id.clone(), Default::default()); } } - pub async fn on_peer_disconnected(&mut self, peer_id: &PeerId, authority_discovery_service: &mut AD) { - let maybe_authority = authority_discovery_service.get_authority_id_by_peer_id(peer_id.clone()).await; - if let Some(authority) = maybe_authority { - let _ = self.connected_validators.remove(&authority); - } + /// Should be called when a peer disconnected. + pub fn on_peer_disconnected(&mut self, peer_id: &PeerId) { + self.connected_peers.remove(peer_id); } } @@ -319,6 +342,7 @@ mod tests { priority_group: HashSet, } + #[derive(Default)] struct TestAuthorityDiscovery { by_authority_id: HashMap, by_peer_id: HashMap, @@ -386,39 +410,18 @@ mod tests { } #[test] - fn request_is_revoked_on_send() { - let (revoke_tx, revoke_rx) = oneshot::channel(); - let (sender, _receiver) = mpsc::channel(0); + fn request_is_revoked_when_the_receiver_is_dropped() { + let (sender, receiver) = mpsc::channel(0); let mut request = NonRevokedConnectionRequestState::new( Vec::new(), HashSet::new(), sender, - revoke_rx, ); assert!(!request.is_revoked()); - revoke_tx.send(()).unwrap(); - - assert!(request.is_revoked()); - } - - #[test] - fn request_is_revoked_when_the_sender_is_dropped() { - let (revoke_tx, revoke_rx) = oneshot::channel(); - let (sender, _receiver) = mpsc::channel(0); - - let mut request = NonRevokedConnectionRequestState::new( - Vec::new(), - HashSet::new(), - sender, - revoke_rx, - ); - - assert!(!request.is_revoked()); - - drop(revoke_tx); + drop(receiver); assert!(request.is_revoked()); } @@ -435,14 +438,12 @@ mod tests { futures::executor::block_on(async move { let req1 = vec![authority_ids[0].clone(), authority_ids[1].clone()]; let (sender, mut receiver) = mpsc::channel(2); - let (_revoke_tx, revoke_rx) = oneshot::channel(); service.on_peer_connected(&peer_ids[0], &mut ads).await; let _ = service.on_request( req1, sender, - revoke_rx, ns, ads, ).await; @@ -467,12 +468,10 @@ mod tests { futures::executor::block_on(async move { let req1 = vec![authority_ids[0].clone(), authority_ids[1].clone()]; let (sender, mut receiver) = mpsc::channel(2); - let (_revoke_tx, revoke_rx) = oneshot::channel(); let (_, mut ads) = service.on_request( req1, sender, - revoke_rx, ns, ads, ).await; @@ -502,7 +501,6 @@ mod tests { futures::executor::block_on(async move { let (sender, mut receiver) = mpsc::channel(1); - let (revoke_tx, revoke_rx) = oneshot::channel(); service.on_peer_connected(&peer_ids[0], &mut ads).await; service.on_peer_connected(&peer_ids[1], &mut ads).await; @@ -510,22 +508,19 @@ mod tests { let (ns, ads) = service.on_request( vec![authority_ids[0].clone()], sender, - revoke_rx, ns, ads, ).await; let _ = receiver.next().await.unwrap(); // revoke the request - revoke_tx.send(()).unwrap(); + drop(receiver); let (sender, mut receiver) = mpsc::channel(1); - let (_revoke_tx, revoke_rx) = oneshot::channel(); let _ = service.on_request( vec![authority_ids[1].clone()], sender, - revoke_rx, ns, ads, ).await; @@ -549,7 +544,6 @@ mod tests { futures::executor::block_on(async move { let (sender, mut receiver) = mpsc::channel(1); - let (revoke_tx, revoke_rx) = oneshot::channel(); service.on_peer_connected(&peer_ids[0], &mut ads).await; service.on_peer_connected(&peer_ids[1], &mut ads).await; @@ -557,22 +551,19 @@ mod tests { let (ns, ads) = service.on_request( vec![authority_ids[0].clone(), authority_ids[2].clone()], sender, - revoke_rx, ns, ads, ).await; let _ = receiver.next().await.unwrap(); // revoke the first request - revoke_tx.send(()).unwrap(); + drop(receiver); let (sender, mut receiver) = mpsc::channel(1); - let (revoke_tx, revoke_rx) = oneshot::channel(); let (ns, ads) = service.on_request( vec![authority_ids[0].clone(), authority_ids[1].clone()], sender, - revoke_rx, ns, ads, ).await; @@ -582,15 +573,13 @@ mod tests { assert_eq!(ns.priority_group.len(), 2); // revoke the second request - revoke_tx.send(()).unwrap(); + drop(receiver); let (sender, mut receiver) = mpsc::channel(1); - let (_revoke_tx, revoke_rx) = oneshot::channel(); let (ns, _) = service.on_request( vec![authority_ids[0].clone()], sender, - revoke_rx, ns, ads, ).await; @@ -600,4 +589,37 @@ mod tests { assert_eq!(ns.priority_group.len(), 1); }); } + + /// A test for when a validator connects, but the authority discovery not yet knows that the connecting node + /// is a validator. This can happen for example at startup of a node. + #[test] + fn handle_validator_connect_without_authority_discovery_knowing_it() { + let mut service = new_service(); + + let ns = TestNetwork::default(); + let mut ads = TestAuthorityDiscovery::default(); + + let validator_peer_id = PeerId::random(); + let validator_id: AuthorityDiscoveryId = Sr25519Keyring::Alice.public().into(); + + futures::executor::block_on(async move { + let (sender, mut receiver) = mpsc::channel(1); + + service.on_peer_connected(&validator_peer_id, &mut ads).await; + + let address = known_multiaddr()[0].clone().with(Protocol::P2p(validator_peer_id.clone().into())); + ads.by_peer_id.insert(validator_peer_id.clone(), validator_id.clone()); + ads.by_authority_id.insert(validator_id.clone(), address); + + let _ = service.on_request( + vec![validator_id.clone()], + sender, + ns, + ads, + ).await; + + assert_eq!((validator_id.clone(), validator_peer_id.clone()), receiver.next().await.unwrap()); + assert!(service.connected_peers.get(&validator_peer_id).unwrap().contains(&validator_id)); + }); + } } diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index 3b10015e1545dae174fc8fe26b8596910cfe6d72..1f53342dfde52c33646ce15ab7debc37d6f66d45 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -5,9 +5,10 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" -thiserror = "1.0.21" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" +thiserror = "1.0.22" polkadot-primitives = { path = "../../../primitives" } @@ -16,12 +17,13 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } [dev-dependencies] -env_logger = "0.7.1" -assert_matches = "1.3.0" -smallvec = "1.4.2" +log = "0.4.11" +env_logger = "0.8.2" +assert_matches = "1.4.0" +smallvec = "1.5.1" futures-timer = "3.0.2" -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", features = ["std"] } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", features = ["std"] } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-subsystem-testhelpers = { package = "polkadot-node-subsystem-test-helpers", path = "../../subsystem-test-helpers" } diff --git a/node/network/collator-protocol/src/collator_side.rs b/node/network/collator-protocol/src/collator_side.rs index 3cf7808e36bcbb22978e95b670b24b7812bfd0aa..294be061902d0707e4a21ef7170b4eb715d05626 100644 --- a/node/network/collator-protocol/src/collator_side.rs +++ b/node/network/collator-protocol/src/collator_side.rs @@ -14,23 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; -use super::{TARGET, Result}; +use super::{LOG_TARGET, Result}; -use futures::channel::oneshot; -use futures::stream::StreamExt as _; -use futures::task::Poll; -use log::warn; +use futures::{select, FutureExt}; use polkadot_primitives::v1::{ - CollatorId, CoreIndex, CoreState, Hash, Id as ParaId, CandidateReceipt, - PoV, ValidatorId, + CollatorId, CoreIndex, CoreState, Hash, Id as ParaId, CandidateReceipt, PoV, ValidatorId, }; use polkadot_subsystem::{ FromOverseer, OverseerSignal, SubsystemContext, messages::{ - AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest, + AllMessages, CollatorProtocolMessage, NetworkBridgeMessage, }, }; @@ -41,6 +37,7 @@ use polkadot_node_subsystem_util::{ validator_discovery, request_validators_ctx, request_validator_groups_ctx, + request_availability_cores_ctx, metrics::{self, prometheus}, }; @@ -59,12 +56,24 @@ impl Metrics { metrics.collations_sent.inc(); } } + + /// Provide a timer for handling `ConnectionRequest` which observes on drop. + fn time_handle_connection_request(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_connection_request.start_timer()) + } + + /// Provide a timer for `process_msg` which observes on drop. + fn time_process_msg(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_msg.start_timer()) + } } #[derive(Clone)] struct MetricsInner { advertisements_made: prometheus::Counter, collations_sent: prometheus::Counter, + handle_connection_request: prometheus::Histogram, + process_msg: prometheus::Histogram, } impl metrics::Metrics for Metrics { @@ -86,12 +95,84 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + handle_connection_request: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_collator_protocol_collator_handle_connection_request", + "Time spent within `collator_protocol_collator::handle_connection_request`", + ) + )?, + registry, + )?, + process_msg: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_collator_protocol_collator_process_msg", + "Time spent within `collator_protocol_collator::process_msg`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } +/// The group of validators that is assigned to our para at a given point of time. +/// +/// This structure is responsible for keeping track of which validators belong to a certain group for a para. It also +/// stores a mapping from [`PeerId`] to [`ValidatorId`] as we learn about it over the lifetime of this object. Besides +/// that it also keeps track to which validators we advertised our collation. +struct ValidatorGroup { + /// All [`ValidatorId`]'s that are assigned to us in this group. + validator_ids: HashSet, + /// The mapping from [`PeerId`] to [`ValidatorId`]. This is filled over time as we learn the [`PeerId`]'s from the + /// authority discovery. It is not ensured that this will contain *all* validators of this group. + peer_ids: HashMap, + /// All [`ValidatorId`]'s of the current group to that we advertised our collation. + advertised_to: HashSet, +} + +impl ValidatorGroup { + /// Returns `true` if we should advertise our collation to the given peer. + fn should_advertise_to(&self, peer: &PeerId) -> bool { + match self.peer_ids.get(peer) { + Some(validator_id) => !self.advertised_to.contains(validator_id), + None => false, + } + } + + /// Should be called after we advertised our collation to the given `peer` to keep track of it. + fn advertised_to_peer(&mut self, peer: &PeerId) { + if let Some(validator_id) = self.peer_ids.get(peer) { + self.advertised_to.insert(validator_id.clone()); + } + } + + /// Add a [`PeerId`] that belongs to the given [`ValidatorId`]. + /// + /// This returns `true` if the given validator belongs to this group and we could insert its [`PeerId`]. + fn add_peer_id_for_validator(&mut self, peer_id: &PeerId, validator_id: &ValidatorId) -> bool { + if !self.validator_ids.contains(validator_id) { + false + } else { + self.peer_ids.insert(peer_id.clone(), validator_id.clone()); + true + } + } +} + +impl From> for ValidatorGroup { + fn from(validator_ids: HashSet) -> Self { + Self { + validator_ids, + peer_ids: HashMap::new(), + advertised_to: HashSet::new(), + } + } +} + #[derive(Default)] struct State { /// Our id. @@ -113,24 +194,26 @@ struct State { /// We will keep up to one local collation per relay-parent. collations: HashMap, - /// Our validator groups active leafs. - our_validators_groups: HashMap>, + /// Our validator groups per active leaf. + our_validators_groups: HashMap, - /// Validators we know about via `ConnectToValidators` message. - /// - /// These are the only validators we are interested in talking to and as such - /// all actions from peers not in this map will be ignored. - /// Entries in this map will be cleared as validator groups in `our_validator_groups` - /// go out of scope with their respective deactivated leafs. - known_validators: HashMap, + /// List of peers where we declared ourself as a collator. + declared_at: HashSet, - /// Use to await for the next validator connection and revoke the request. - last_connection_request: Option, + /// The connection requests to validators per relay parent. + connection_requests: validator_discovery::ConnectionRequests, /// Metrics. metrics: Metrics, } +impl State { + /// Returns `true` if the given `peer` is interested in the leaf that is represented by `relay_parent`. + fn peer_interested_in_leaf(&self, peer: &PeerId, relay_parent: &Hash) -> bool { + self.peer_views.get(peer).map(|v| v.contains(relay_parent)).unwrap_or(false) + } +} + /// Distribute a collation. /// /// Figure out the core our para is assigned to and the relevant validators. @@ -139,24 +222,22 @@ struct State { /// or the relay-parent isn't in the active-leaves set, we ignore the message /// as it must be invalid in that case - although this indicates a logic error /// elsewhere in the node. -async fn distribute_collation( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] +async fn distribute_collation( + ctx: &mut impl SubsystemContext, state: &mut State, id: ParaId, receipt: CandidateReceipt, pov: PoV, -) -> Result<()> -where - Context: SubsystemContext -{ +) -> Result<()> { let relay_parent = receipt.descriptor.relay_parent; // This collation is not in the active-leaves set. if !state.view.contains(&relay_parent) { - warn!( - target: TARGET, - "Distribute collation message parent {:?} is outside of our view", - relay_parent, + tracing::warn!( + target: LOG_TARGET, + relay_parent = %relay_parent, + "distribute collation message parent is outside of our view", ); return Ok(()); @@ -172,44 +253,34 @@ where let (our_core, num_cores) = match determine_core(ctx, id, relay_parent).await? { Some(core) => core, None => { - warn!( - target: TARGET, - "Looks like no core is assigned to {:?} at {:?}", id, relay_parent, + tracing::warn!( + target: LOG_TARGET, + para_id = %id, + relay_parent = %relay_parent, + "looks like no core is assigned to {} at {}", id, relay_parent, ); - return Ok(()); + + return Ok(()) } }; // Determine the group on that core and the next group on that core. - let our_validators = match determine_our_validators(ctx, our_core, num_cores, relay_parent).await? { - Some(validators) => validators, - None => { - warn!( - target: TARGET, - "There are no validators assigned to {:?} core", our_core, - ); + let (current_validators, next_validators) = determine_our_validators(ctx, our_core, num_cores, relay_parent).await?; - return Ok(()); - } - }; - - state.our_validators_groups.insert(relay_parent, our_validators.clone()); + if current_validators.is_empty() && next_validators.is_empty() { + tracing::warn!( + target: LOG_TARGET, + core = ?our_core, + "there are no validators assigned to core", + ); - // We may be already connected to some of the validators. In that case, - // advertise a collation to them right away. - for validator in our_validators.iter() { - if let Some(peer) = state.known_validators.get(&validator) { - if let Some(view) = state.peer_views.get(peer) { - if view.contains(&relay_parent) { - let peer = peer.clone(); - advertise_collation(ctx, state, relay_parent, vec![peer]).await?; - } - } - } + return Ok(()); } // Issue a discovery request for the validators of the current group and the next group. - connect_to_validators(ctx, relay_parent, state, our_validators).await?; + connect_to_validators(ctx, relay_parent, state, current_validators.union(&next_validators).cloned().collect()).await?; + + state.our_validators_groups.insert(relay_parent, current_validators.into()); state.collations.insert(relay_parent, (receipt, pov)); @@ -218,24 +289,13 @@ where /// Get the Id of the Core that is assigned to the para being collated on if any /// and the total number of cores. -async fn determine_core( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +async fn determine_core( + ctx: &mut impl SubsystemContext, para_id: ParaId, relay_parent: Hash, -) -> Result> -where - Context: SubsystemContext -{ - let (tx, rx) = oneshot::channel(); - - ctx.send_message(AllMessages::RuntimeApi( - RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::AvailabilityCores(tx), - ) - )).await?; - - let cores = rx.await??; +) -> Result> { + let cores = request_availability_cores_ctx(relay_parent, ctx).await?.await??; for (idx, core) in cores.iter().enumerate() { if let CoreState::Scheduled(occupied) = core { @@ -248,136 +308,123 @@ where Ok(None) } -/// Figure out a group of validators assigned to the para being collated on. +/// Figure out current and next group of validators assigned to the para being collated on. /// -/// This returns validators for the current group and the next group. -async fn determine_our_validators( - ctx: &mut Context, +/// Returns [`ValidatorId`]'s of current and next group as determined based on the `relay_parent`. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +async fn determine_our_validators( + ctx: &mut impl SubsystemContext, core_index: CoreIndex, cores: usize, relay_parent: Hash, -) -> Result>> -where - Context: SubsystemContext -{ +) -> Result<(HashSet, HashSet)> { let groups = request_validator_groups_ctx(relay_parent, ctx).await?; let groups = groups.await??; let current_group_index = groups.1.group_for_core(core_index, cores); - - let mut connect_to_validators = match groups.0.get(current_group_index.0 as usize) { - Some(group) => group.clone(), - None => return Ok(None), - }; + let current_validators = groups.0.get(current_group_index.0 as usize).map(|v| v.as_slice()).unwrap_or_default(); let next_group_idx = (current_group_index.0 as usize + 1) % groups.0.len(); + let next_validators = groups.0.get(next_group_idx).map(|v| v.as_slice()).unwrap_or_default(); - if let Some(next_group) = groups.0.get(next_group_idx) { - connect_to_validators.extend_from_slice(&next_group); - } - - let validators = request_validators_ctx(relay_parent, ctx).await?; - - let validators = validators.await??; + let validators = request_validators_ctx(relay_parent, ctx).await?.await??; - let validators = connect_to_validators - .into_iter() - .map(|idx| validators[idx as usize].clone()) - .collect(); + let current_validators = current_validators.iter().map(|i| validators[*i as usize].clone()).collect(); + let next_validators = next_validators.iter().map(|i| validators[*i as usize].clone()).collect(); - Ok(Some(validators)) + Ok((current_validators, next_validators)) } -/// Issue a `Declare` collation message to a set of peers. -async fn declare( - ctx: &mut Context, +/// Issue a `Declare` collation message to the given `peer`. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn declare( + ctx: &mut impl SubsystemContext, state: &mut State, - to: Vec, -) -> Result<()> -where - Context: SubsystemContext -{ + peer: PeerId, +) { let wire_message = protocol_v1::CollatorProtocolMessage::Declare(state.our_id.clone()); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::SendCollationMessage( - to, + vec![peer], protocol_v1::CollationProtocol::CollatorProtocol(wire_message), ) - )).await?; - - Ok(()) + )).await; } /// Issue a connection request to a set of validators and /// revoke the previous connection request. -async fn connect_to_validators( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn connect_to_validators( + ctx: &mut impl SubsystemContext, relay_parent: Hash, state: &mut State, validators: Vec, -) -> Result<()> -where - Context: SubsystemContext -{ - if let Some(request) = state.last_connection_request.take() { - request.revoke(); - } - +) -> Result<()> { let request = validator_discovery::connect_to_validators( ctx, relay_parent, validators, ).await?; - state.last_connection_request = Some(request); + state.connection_requests.put(relay_parent, request); Ok(()) } -/// Advertise collation to a set of relay chain validators. -async fn advertise_collation( - ctx: &mut Context, +/// Advertise collation to the given `peer`. +/// +/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is +/// set as validator for our para at the given `relay_parent`. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn advertise_collation( + ctx: &mut impl SubsystemContext, state: &mut State, relay_parent: Hash, - to: Vec, -) -> Result<()> -where - Context: SubsystemContext -{ + peer: PeerId, +) { let collating_on = match state.collating_on { Some(collating_on) => collating_on, - None => { - return Ok(()); - } + None => return, }; + let should_advertise = state.our_validators_groups + .get(&relay_parent) + .map(|g| g.should_advertise_to(&peer)) + .unwrap_or(false); + + if !state.collations.contains_key(&relay_parent) || !should_advertise { + return; + } + let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent, collating_on); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::SendCollationMessage( - to, + vec![peer.clone()], protocol_v1::CollationProtocol::CollatorProtocol(wire_message), ) - )).await?; + )).await; - state.metrics.on_advertisment_made(); + if let Some(validators) = state.our_validators_groups.get_mut(&relay_parent) { + validators.advertised_to_peer(&peer); + } - Ok(()) + state.metrics.on_advertisment_made(); } /// The main incoming message dispatching switch. -async fn process_msg( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn process_msg( + ctx: &mut impl SubsystemContext, state: &mut State, msg: CollatorProtocolMessage, -) -> Result<()> -where - Context: SubsystemContext -{ +) -> Result<()> { use CollatorProtocolMessage::*; + let _timer = state.metrics.time_process_msg(); + match msg { CollateOn(id) => { state.collating_on = Some(id); @@ -387,40 +434,40 @@ where Some(id) if receipt.descriptor.para_id != id => { // If the ParaId of a collation requested to be distributed does not match // the one we expect, we ignore the message. - warn!( - target: TARGET, - "DistributeCollation message for para {:?} while collating on {:?}", - receipt.descriptor.para_id, - id, + tracing::warn!( + target: LOG_TARGET, + para_id = %receipt.descriptor.para_id, + collating_on = %id, + "DistributeCollation for unexpected para_id", ); } Some(id) => { distribute_collation(ctx, state, id, receipt, pov).await?; } None => { - warn!( - target: TARGET, - "DistributeCollation message for para {:?} while not collating on any", - receipt.descriptor.para_id, + tracing::warn!( + target: LOG_TARGET, + para_id = %receipt.descriptor.para_id, + "DistributeCollation message while not collating on any", ); } } } FetchCollation(_, _, _, _) => { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "FetchCollation message is not expected on the collator side of the protocol", ); } ReportCollator(_) => { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "ReportCollator message is not expected on the collator side of the protocol", ); } NoteGoodCollation(_) => { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "NoteGoodCollation message is not expected on the collator side of the protocol", ); } @@ -430,9 +477,10 @@ where state, event, ).await { - warn!( - target: TARGET, - "Failed to handle incoming network message: {:?}", e, + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to handle incoming network message", ); } }, @@ -442,17 +490,15 @@ where } /// Issue a response to a previously requested collation. -async fn send_collation( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] +async fn send_collation( + ctx: &mut impl SubsystemContext, state: &mut State, request_id: RequestId, origin: PeerId, receipt: CandidateReceipt, pov: PoV, -) -> Result<()> -where - Context: SubsystemContext -{ +) { let wire_message = protocol_v1::CollatorProtocolMessage::Collation( request_id, receipt, @@ -464,35 +510,31 @@ where vec![origin], protocol_v1::CollationProtocol::CollatorProtocol(wire_message), ) - )).await?; + )).await; state.metrics.on_collation_sent(); - - Ok(()) } /// A networking messages switch. -async fn handle_incoming_peer_message( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn handle_incoming_peer_message( + ctx: &mut impl SubsystemContext, state: &mut State, origin: PeerId, msg: protocol_v1::CollatorProtocolMessage, -) -> Result<()> -where - Context: SubsystemContext -{ +) -> Result<()> { use protocol_v1::CollatorProtocolMessage::*; match msg { Declare(_) => { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "Declare message is not expected on the collator side of the protocol", ); } AdvertiseCollation(_, _) => { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "AdvertiseCollation message is not expected on the collator side of the protocol", ); } @@ -501,28 +543,29 @@ where Some(our_para_id) => { if our_para_id == para_id { if let Some(collation) = state.collations.get(&relay_parent).cloned() { - send_collation(ctx, state, request_id, origin, collation.0, collation.1).await?; + send_collation(ctx, state, request_id, origin, collation.0, collation.1).await; } } else { - warn!( - target: TARGET, - "Received a RequestCollation for {:?} while collating on {:?}", - para_id, our_para_id, + tracing::warn!( + target: LOG_TARGET, + for_para_id = %para_id, + our_para_id = %our_para_id, + "received a RequestCollation for unexpected para_id", ); } } None => { - warn!( - target: TARGET, - "Received a RequestCollation for {:?} while not collating on any para", - para_id, + tracing::warn!( + target: LOG_TARGET, + for_para_id = %para_id, + "received a RequestCollation while not collating on any para", ); } } } Collation(_, _, _) => { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "Collation message is not expected on the collator side of the protocol", ); } @@ -532,15 +575,13 @@ where } /// Our view has changed. -async fn handle_peer_view_change( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn handle_peer_view_change( + ctx: &mut impl SubsystemContext, state: &mut State, peer_id: PeerId, view: View, -) -> Result<()> -where - Context: SubsystemContext -{ +) { let current = state.peer_views.entry(peer_id.clone()).or_default(); let added: Vec = view.difference(&*current).cloned().collect(); @@ -548,43 +589,48 @@ where *current = view; for added in added.into_iter() { - if state.collations.contains_key(&added) { - advertise_collation(ctx, state, added.clone(), vec![peer_id.clone()]).await?; - } + advertise_collation(ctx, state, added, peer_id.clone()).await; } - - Ok(()) } /// A validator is connected. /// /// `Declare` that we are a collator with a given `CollatorId`. -async fn handle_validator_connected( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn handle_validator_connected( + ctx: &mut impl SubsystemContext, state: &mut State, peer_id: PeerId, -) -> Result<()> -where - Context: SubsystemContext -{ - if !state.peer_views.contains_key(&peer_id) { - // Only declare the new peers. - declare(ctx, state, vec![peer_id.clone()]).await?; - state.peer_views.insert(peer_id, Default::default()); + validator_id: ValidatorId, + relay_parent: Hash, +) { + let not_declared = state.declared_at.insert(peer_id.clone()); + + if not_declared { + declare(ctx, state, peer_id.clone()).await; } - Ok(()) + // Store the PeerId and find out if we should advertise to this peer. + // + // If this peer does not belong to the para validators, we also don't need to try to advertise our collation. + let advertise = if let Some(validators) = state.our_validators_groups.get_mut(&relay_parent) { + validators.add_peer_id_for_validator(&peer_id, &validator_id) + } else { + false + }; + + if advertise && state.peer_interested_in_leaf(&peer_id, &relay_parent) { + advertise_collation(ctx, state, relay_parent, peer_id).await; + } } /// Bridge messages switch. -async fn handle_network_msg( - ctx: &mut Context, +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] +async fn handle_network_msg( + ctx: &mut impl SubsystemContext, state: &mut State, bridge_message: NetworkBridgeEvent, -) -> Result<()> -where - Context: SubsystemContext -{ +) -> Result<()> { use NetworkBridgeEvent::*; match bridge_message { @@ -593,11 +639,11 @@ where // it should be handled here. } PeerViewChange(peer_id, view) => { - handle_peer_view_change(ctx, state, peer_id, view).await?; + handle_peer_view_change(ctx, state, peer_id, view).await; } PeerDisconnected(peer_id) => { - state.known_validators.retain(|_, v| *v != peer_id); state.peer_views.remove(&peer_id); + state.declared_at.remove(&peer_id); } OurViewChange(view) => { handle_our_view_change(state, view).await?; @@ -611,69 +657,62 @@ where } /// Handles our view changes. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: View, ) -> Result<()> { - let old_view = std::mem::replace(&mut (state.view), view); - - let view = state.view.clone(); - - let removed = old_view.difference(&view).collect::>(); - - for removed in removed.into_iter() { + for removed in state.view.difference(&view) { state.collations.remove(removed); state.our_validators_groups.remove(removed); + state.connection_requests.remove(removed); } + state.view = view; + Ok(()) } /// The collator protocol collator side main loop. -pub(crate) async fn run( - mut ctx: Context, +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] +pub(crate) async fn run( + mut ctx: impl SubsystemContext, our_id: CollatorId, metrics: Metrics, -) -> Result<()> -where - Context: SubsystemContext -{ +) -> Result<()> { use FromOverseer::*; use OverseerSignal::*; let mut state = State { metrics, + our_id, ..Default::default() }; - state.our_id = our_id; - loop { - if let Some(mut request) = state.last_connection_request.take() { - while let Poll::Ready(Some((validator_id, peer_id))) = futures::poll!(request.next()) { - state.known_validators.insert(validator_id, peer_id.clone()); - if let Err(err) = handle_validator_connected(&mut ctx, &mut state, peer_id).await { - warn!( - target: TARGET, - "Failed to declare our collator id: {:?}", - err, - ); - } - } - // put it back - state.last_connection_request = Some(request); - } - - while let Poll::Ready(msg) = futures::poll!(ctx.recv()) { - match msg? { - Communication { msg } => process_msg(&mut ctx, &mut state, msg).await?, + select! { + res = state.connection_requests.next().fuse() => { + let _timer = state.metrics.time_handle_connection_request(); + + handle_validator_connected( + &mut ctx, + &mut state, + res.peer_id, + res.validator_id, + res.relay_parent, + ).await; + }, + msg = ctx.recv().fuse() => match msg? { + Communication { msg } => { + if let Err(e) = process_msg(&mut ctx, &mut state, msg).await { + tracing::warn!(target: LOG_TARGET, err = ?e, "Failed to process message"); + } + }, Signal(ActiveLeaves(_update)) => {} Signal(BlockFinalized(_)) => {} Signal(Conclude) => return Ok(()), } } - - futures::pending!() } } @@ -684,8 +723,7 @@ mod tests { use std::time::Duration; use assert_matches::assert_matches; - use futures::{executor, future, Future}; - use log::trace; + use futures::{executor, future, Future, channel::mpsc}; use smallvec::smallvec; use sp_core::crypto::Pair; @@ -694,8 +732,9 @@ mod tests { use polkadot_primitives::v1::{ BlockData, CandidateDescriptor, CollatorPair, ScheduledCore, ValidatorIndex, GroupRotationInfo, AuthorityDiscoveryId, + SessionIndex, SessionInfo, }; - use polkadot_subsystem::ActiveLeavesUpdate; + use polkadot_subsystem::{ActiveLeavesUpdate, messages::{RuntimeApiMessage, RuntimeApiRequest}}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_subsystem_testhelpers as test_helpers; @@ -723,15 +762,16 @@ mod tests { #[derive(Clone)] struct TestState { - chain_ids: Vec, + para_id: ParaId, validators: Vec, validator_public: Vec, validator_authority_id: Vec, validator_peer_id: Vec, validator_groups: (Vec>, GroupRotationInfo), relay_parent: Hash, - availability_cores: Vec, + availability_core: CoreState, our_collator_pair: CollatorPair, + session_index: SessionIndex, } fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { @@ -744,10 +784,7 @@ mod tests { impl Default for TestState { fn default() -> Self { - let chain_a = ParaId::from(1); - let chain_b = ParaId::from(2); - - let chain_ids = vec![chain_a, chain_b]; + let para_id = ParaId::from(1); let validators = vec![ Sr25519Keyring::Alice, @@ -764,7 +801,7 @@ mod tests { .take(validator_public.len()) .collect(); - let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]]; + let validator_groups = vec![vec![2, 0, 4], vec![3, 2, 4]]; let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, @@ -772,37 +809,98 @@ mod tests { }; let validator_groups = (validator_groups, group_rotation_info); - let availability_cores = vec![ - CoreState::Scheduled(ScheduledCore { - para_id: chain_ids[0], - collator: None, - }), - CoreState::Scheduled(ScheduledCore { - para_id: chain_ids[1], - collator: None, - }), - ]; + let availability_core = CoreState::Scheduled(ScheduledCore { + para_id, + collator: None, + }); - let relay_parent = Hash::repeat_byte(0x05); + let relay_parent = Hash::random(); let our_collator_pair = CollatorPair::generate().0; Self { - chain_ids, + para_id, validators, validator_public, validator_authority_id, validator_peer_id, validator_groups, relay_parent, - availability_cores, + availability_core, our_collator_pair, + session_index: 1, } } } + impl TestState { + fn current_group_validator_indices(&self) -> &[ValidatorIndex] { + &self.validator_groups.0[0] + } + + fn current_session_index(&self) -> SessionIndex { + self.session_index + } + + fn current_group_validator_peer_ids(&self) -> Vec { + self.current_group_validator_indices().iter().map(|i| self.validator_peer_id[*i as usize].clone()).collect() + } + + fn current_group_validator_authority_ids(&self) -> Vec { + self.current_group_validator_indices() + .iter() + .map(|i| self.validator_authority_id[*i as usize].clone()) + .collect() + } + + fn current_group_validator_ids(&self) -> Vec { + self.current_group_validator_indices() + .iter() + .map(|i| self.validator_public[*i as usize].clone()) + .collect() + } + + fn next_group_validator_indices(&self) -> &[ValidatorIndex] { + &self.validator_groups.0[1] + } + + fn next_group_validator_authority_ids(&self) -> Vec { + self.next_group_validator_indices() + .iter() + .map(|i| self.validator_authority_id[*i as usize].clone()) + .collect() + } + + /// Generate a new relay parent and inform the subsystem about the new view. + /// + /// If `merge_views == true` it means the subsystem will be informed that we working on the old `relay_parent` + /// and the new one. + async fn advance_to_new_round(&mut self, virtual_overseer: &mut VirtualOverseer, merge_views: bool) { + let old_relay_parent = self.relay_parent; + + while self.relay_parent == old_relay_parent { + self.relay_parent.randomize(); + } + + let hashes = if merge_views { + vec![old_relay_parent, self.relay_parent] + } else { + vec![self.relay_parent] + }; + + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::OurViewChange(View(hashes)), + ), + ).await; + } + } + + type VirtualOverseer = test_helpers::TestSubsystemContextHandle; + struct TestHarness { - virtual_overseer: test_helpers::TestSubsystemContextHandle, + virtual_overseer: VirtualOverseer, } fn test_harness>( @@ -816,7 +914,7 @@ mod tests { log::LevelFilter::Trace, ) .filter( - Some(TARGET), + Some(LOG_TARGET), log::LevelFilter::Trace, ) .try_init(); @@ -841,7 +939,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, msg: CollatorProtocolMessage, ) { - trace!("Sending message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "sending message"); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -856,7 +954,7 @@ mod tests { .await .expect(&format!("{:?} is more than enough to receive messages", TIMEOUT)); - trace!("Received message:\n{:?}", &msg); + tracing::trace!(msg = ?msg, "received message"); msg } @@ -865,7 +963,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - trace!("Waiting for message..."); + tracing::trace!("waiting for message..."); overseer .recv() .timeout(timeout) @@ -883,200 +981,269 @@ mod tests { .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); } - #[test] - fn advertise_and_send_collation() { - let test_state = TestState::default(); + // Setup the system by sending the `CollateOn`, `ActiveLeaves` and `OurViewChange` messages. + async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestState) { + overseer_send( + virtual_overseer, + CollatorProtocolMessage::CollateOn(test_state.para_id), + ).await; + + overseer_signal( + virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: smallvec![test_state.relay_parent], + deactivated: smallvec![], + }), + ).await; + + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::OurViewChange(View(vec![test_state.relay_parent])), + ), + ).await; + } - test_harness(test_state.our_collator_pair.public(), |test_harness| async move { - let current = test_state.relay_parent; - let TestHarness { - mut virtual_overseer, - } = test_harness; + /// Result of [`distribute_collation`] + struct DistributeCollation { + /// Should be used to inform the subsystem about connected validators. + connected: mpsc::Sender<(AuthorityDiscoveryId, PeerId)>, + candidate: CandidateReceipt, + pov_block: PoV, + } - let pov_block = PoV { - block_data: BlockData(vec![42, 43, 44]), - }; + /// Create some PoV and distribute it. + async fn distribute_collation( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + ) -> DistributeCollation { + // Now we want to distribute a PoVBlock + let pov_block = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; - let pov_hash = pov_block.hash(); + let pov_hash = pov_block.hash(); - let candidate = TestCandidateBuilder { - para_id: test_state.chain_ids[0], - relay_parent: test_state.relay_parent, - pov_hash, - ..Default::default() - }.build(); + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: test_state.relay_parent, + pov_hash, + ..Default::default() + }.build(); - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::CollateOn(test_state.chain_ids[0]) - ).await; + overseer_send( + virtual_overseer, + CollatorProtocolMessage::DistributeCollation(candidate.clone(), pov_block.clone()), + ).await; - overseer_signal( - &mut virtual_overseer, - OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { - activated: smallvec![current.clone()], - deactivated: smallvec![], - }), - ).await; + // obtain the availability cores. + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx) + )) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(Ok(vec![test_state.availability_core.clone()])).unwrap(); + } + ); - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdateV1( - NetworkBridgeEvent::OurViewChange(View(vec![current])), - ), - ).await; + // Obtain the validator groups + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::ValidatorGroups(tx) + )) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::DistributeCollation(candidate.clone(), pov_block.clone()), - ).await; + // obtain the validators per relay parent + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); - // obtain the availability cores. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::AvailabilityCores(tx) - )) => { - assert_eq!(relay_parent, current); - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - } - ); + // obtain the validator_id to authority_id mapping + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(Ok(test_state.current_session_index())).unwrap(); + } + ); - // Obtain the validator groups - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::ValidatorGroups(tx) - )) => { - assert_eq!(relay_parent, current); - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); - } - ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionInfo(index, tx), + )) => { + assert_eq!(relay_parent, test_state.relay_parent); + assert_eq!(index, test_state.current_session_index()); - // obtain the validators per relay parent - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::Validators(tx), - )) => { - assert_eq!(relay_parent, current); - tx.send(Ok(test_state.validator_public.clone())).unwrap(); - } - ); + let validators = test_state.current_group_validator_ids(); + let current_discovery_keys = test_state.current_group_validator_authority_ids(); + let next_discovery_keys = test_state.next_group_validator_authority_ids(); - // obtain the validator_id to authority_id mapping - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::ValidatorDiscovery(validators, tx), - )) => { - assert_eq!(relay_parent, current); - assert_eq!(validators.len(), 4); - assert!(validators.contains(&test_state.validator_public[2])); - assert!(validators.contains(&test_state.validator_public[0])); - assert!(validators.contains(&test_state.validator_public[4])); - assert!(validators.contains(&test_state.validator_public[1])); - - let result = vec![ - Some(test_state.validator_authority_id[2].clone()), - Some(test_state.validator_authority_id[0].clone()), - Some(test_state.validator_authority_id[4].clone()), - Some(test_state.validator_authority_id[1].clone()), - ]; - tx.send(Ok(result)).unwrap(); + let discovery_keys = [¤t_discovery_keys[..], &next_discovery_keys[..]].concat(); + + tx.send(Ok(Some(SessionInfo { + validators, + discovery_keys, + ..Default::default() + }))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ConnectToValidators { + connected, + .. } - ); + ) => { + DistributeCollation { + connected, + candidate, + pov_block, + } + } + ) + } - // We now should connect to our validator group. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridge( - NetworkBridgeMessage::ConnectToValidators { - validator_ids, - mut connected, - .. - } - ) => { - assert_eq!(validator_ids.len(), 4); - assert!(validator_ids.contains(&test_state.validator_authority_id[2])); - assert!(validator_ids.contains(&test_state.validator_authority_id[0])); - assert!(validator_ids.contains(&test_state.validator_authority_id[4])); - assert!(validator_ids.contains(&test_state.validator_authority_id[1])); - - let result = vec![ - (test_state.validator_authority_id[2].clone(), test_state.validator_peer_id[2].clone()), - (test_state.validator_authority_id[0].clone(), test_state.validator_peer_id[0].clone()), - (test_state.validator_authority_id[4].clone(), test_state.validator_peer_id[4].clone()), - (test_state.validator_authority_id[1].clone(), test_state.validator_peer_id[1].clone()), - ]; - - for (id, peer_id) in result.into_iter() { - connected.try_send((id, peer_id)).unwrap(); + /// Connect a peer + async fn connect_peer(virtual_overseer: &mut VirtualOverseer, peer: PeerId) { + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerConnected( + peer.clone(), + polkadot_node_network_protocol::ObservedRole::Authority, + ), + ), + ).await; + + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange(peer, View(Default::default())), + ), + ).await; + } + + /// Disconnect a peer + async fn disconnect_peer(virtual_overseer: &mut VirtualOverseer, peer: PeerId) { + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdateV1(NetworkBridgeEvent::PeerDisconnected(peer)), + ).await; + } + + /// Check that the next received message is a `Declare` message. + async fn expect_declare_msg(virtual_overseer: &mut VirtualOverseer, test_state: &TestState, peer: &PeerId) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendCollationMessage( + to, + protocol_v1::CollationProtocol::CollatorProtocol(wire_message), + ) + ) => { + assert_eq!(to[0], *peer); + assert_matches!( + wire_message, + protocol_v1::CollatorProtocolMessage::Declare(collator_id) => { + assert_eq!(collator_id, test_state.our_collator_pair.public()); } - } - ); + ); + } + ); + } - // We declare to the connected validators that we are a collator. - // We need to catch all `Declare` messages to the validators we've - // previosly connected to. - for i in vec![2, 0, 4, 1].into_iter() { + /// Check that the next received message is a collation advertisment message. + async fn expect_advertise_collation_msg( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + peer: &PeerId, + expected_relay_parent: Hash, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::SendCollationMessage( + to, + protocol_v1::CollationProtocol::CollatorProtocol(wire_message), + ) + ) => { + assert_eq!(to[0], *peer); assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridge( - NetworkBridgeMessage::SendCollationMessage( - to, - protocol_v1::CollationProtocol::CollatorProtocol(wire_message), - ) + wire_message, + protocol_v1::CollatorProtocolMessage::AdvertiseCollation( + relay_parent, + collating_on, ) => { - assert_eq!(to, vec![test_state.validator_peer_id[i].clone()]); - assert_matches!( - wire_message, - protocol_v1::CollatorProtocolMessage::Declare(collator_id) => { - assert_eq!(collator_id, test_state.our_collator_pair.public()); - } - ); + assert_eq!(relay_parent, expected_relay_parent); + assert_eq!(collating_on, test_state.para_id); } ); } + ); + } + + /// Send a message that the given peer's view changed. + async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: &PeerId, hashes: Vec) { + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View(hashes)), + ), + ).await; + } + + #[test] + fn advertise_and_send_collation() { + let mut test_state = TestState::default(); + + test_harness(test_state.our_collator_pair.public(), |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + + setup_system(&mut virtual_overseer, &test_state).await; + + let DistributeCollation { mut connected, candidate, pov_block } = + distribute_collation(&mut virtual_overseer, &test_state).await; + test_state.current_group_validator_authority_ids() + .into_iter() + .zip(test_state.current_group_validator_peer_ids()) + .for_each(|r| connected.try_send(r).unwrap()); + + // We declare to the connected validators that we are a collator. + // We need to catch all `Declare` messages to the validators we've + // previosly connected to. + for peer_id in test_state.current_group_validator_peer_ids() { + expect_declare_msg(&mut virtual_overseer, &test_state, &peer_id).await; + } + + let peer = test_state.current_group_validator_peer_ids()[0].clone(); // Send info about peer's view. - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdateV1( - NetworkBridgeEvent::PeerViewChange( - test_state.validator_peer_id[2].clone(), - View(vec![current]), - ) - ) - ).await; + send_peer_view_change(&mut virtual_overseer, &peer, vec![test_state.relay_parent]).await; // The peer is interested in a leaf that we have a collation for; // advertise it. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridge( - NetworkBridgeMessage::SendCollationMessage( - to, - protocol_v1::CollationProtocol::CollatorProtocol(wire_message), - ) - ) => { - assert_eq!(to, vec![test_state.validator_peer_id[2].clone()]); - assert_matches!( - wire_message, - protocol_v1::CollatorProtocolMessage::AdvertiseCollation( - relay_parent, - collating_on, - ) => { - assert_eq!(relay_parent, current); - assert_eq!(collating_on, test_state.chain_ids[0]); - } - ); - } - ); + expect_advertise_collation_msg(&mut virtual_overseer, &test_state, &peer, test_state.relay_parent).await; let request_id = 42; @@ -1085,11 +1252,11 @@ mod tests { &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdateV1( NetworkBridgeEvent::PeerMessage( - test_state.validator_peer_id[2].clone(), + peer.clone(), protocol_v1::CollatorProtocolMessage::RequestCollation( request_id, - current, - test_state.chain_ids[0], + test_state.relay_parent, + test_state.para_id, ) ) ) @@ -1104,7 +1271,7 @@ mod tests { protocol_v1::CollationProtocol::CollatorProtocol(wire_message), ) ) => { - assert_eq!(to, vec![test_state.validator_peer_id[2].clone()]); + assert_eq!(to, vec![peer]); assert_matches!( wire_message, protocol_v1::CollatorProtocolMessage::Collation(req_id, receipt, pov) => { @@ -1116,28 +1283,21 @@ mod tests { } ); - let new_head = Hash::repeat_byte(0xA); + let old_relay_parent = test_state.relay_parent; + test_state.advance_to_new_round(&mut virtual_overseer, false).await; - // Collator's view moves on. - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdateV1( - NetworkBridgeEvent::OurViewChange(View(vec![new_head])), - ), - ).await; - - let request_id = 43; + let peer = test_state.validator_peer_id[2].clone(); // Re-request a collation. overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdateV1( NetworkBridgeEvent::PeerMessage( - test_state.validator_peer_id[2].clone(), + peer.clone(), protocol_v1::CollatorProtocolMessage::RequestCollation( - request_id, - current, - test_state.chain_ids[0], + 43, + old_relay_parent, + test_state.para_id, ) ) ) @@ -1145,126 +1305,166 @@ mod tests { assert!(overseer_recv_with_timeout(&mut virtual_overseer, TIMEOUT).await.is_none()); - let pov_block = PoV { - block_data: BlockData(vec![45, 46, 47]), - }; - - let pov_hash = pov_block.hash(); - let current = Hash::repeat_byte(33); - - let candidate = TestCandidateBuilder { - para_id: test_state.chain_ids[0], - relay_parent: current, - pov_hash, - ..Default::default() - }.build(); - - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdateV1( - NetworkBridgeEvent::OurViewChange(View(vec![current])), - ), - ).await; + let DistributeCollation { mut connected, .. } = + distribute_collation(&mut virtual_overseer, &test_state).await; + test_state.current_group_validator_authority_ids() + .into_iter() + .zip(test_state.current_group_validator_peer_ids()) + .for_each(|r| connected.try_send(r).unwrap()); // Send info about peer's view. overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdateV1( NetworkBridgeEvent::PeerViewChange( - test_state.validator_peer_id[2].clone(), - View(vec![current]), + peer.clone(), + View(vec![test_state.relay_parent]), ) ) ).await; - overseer_send( - &mut virtual_overseer, - CollatorProtocolMessage::DistributeCollation(candidate.clone(), pov_block.clone()), - ).await; + expect_advertise_collation_msg(&mut virtual_overseer, &test_state, &peer, test_state.relay_parent).await; + }); + } - // obtain the availability cores. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::AvailabilityCores(tx) - )) => { - assert_eq!(relay_parent, current); - tx.send(Ok(test_state.availability_cores.clone())).unwrap(); - } - ); + /// This test ensures that we declare a collator at a validator by sending the `Declare` message as soon as the + /// collator is aware of the validator being connected. + #[test] + fn collators_are_registered_correctly_at_validators() { + let test_state = TestState::default(); - // Obtain the validator groups - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::ValidatorGroups(tx) - )) => { - assert_eq!(relay_parent, current); - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); - } - ); + test_harness(test_state.our_collator_pair.public(), |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; - // obtain the validators per relay parent - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::Validators(tx), - )) => { - assert_eq!(relay_parent, current); - tx.send(Ok(test_state.validator_public.clone())).unwrap(); - } - ); + let peer = test_state.validator_peer_id[0].clone(); + let validator_id = test_state.validator_authority_id[0].clone(); - // The peer is interested in a leaf that we have a collation for; - // advertise it. - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridge( - NetworkBridgeMessage::SendCollationMessage( - to, - protocol_v1::CollationProtocol::CollatorProtocol(wire_message), - ) - ) => { - assert_eq!(to, vec![test_state.validator_peer_id[2].clone()]); - assert_matches!( - wire_message, - protocol_v1::CollatorProtocolMessage::AdvertiseCollation( - relay_parent, - collating_on, - ) => { - assert_eq!(relay_parent, current); - assert_eq!(collating_on, test_state.chain_ids[0]); - } - ); - } - ); + setup_system(&mut virtual_overseer, &test_state).await; - // obtain the validator_id to authority_id mapping - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::ValidatorDiscovery(validators, tx), - )) => { - assert_eq!(relay_parent, current); - assert_eq!(validators.len(), 4); - assert!(validators.contains(&test_state.validator_public[2])); - assert!(validators.contains(&test_state.validator_public[0])); - assert!(validators.contains(&test_state.validator_public[4])); - assert!(validators.contains(&test_state.validator_public[1])); - - let result = vec![ - Some(test_state.validator_authority_id[2].clone()), - Some(test_state.validator_authority_id[0].clone()), - Some(test_state.validator_authority_id[4].clone()), - Some(test_state.validator_authority_id[1].clone()), - ]; - tx.send(Ok(result)).unwrap(); - } - ); - }); + // A validator connected to us + connect_peer(&mut virtual_overseer, peer.clone()).await; + + let mut connected = distribute_collation(&mut virtual_overseer, &test_state).await.connected; + connected.try_send((validator_id, peer.clone())).unwrap(); + + expect_declare_msg(&mut virtual_overseer, &test_state, &peer).await; + }) + } + + #[test] + fn collations_are_only_advertised_to_validators_with_correct_view() { + let test_state = TestState::default(); + + test_harness(test_state.our_collator_pair.public(), |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + + let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + + let peer2 = test_state.current_group_validator_peer_ids()[1].clone(); + let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone(); + + setup_system(&mut virtual_overseer, &test_state).await; + + // A validator connected to us + connect_peer(&mut virtual_overseer, peer.clone()).await; + + // Connect the second validator + connect_peer(&mut virtual_overseer, peer2.clone()).await; + + // And let it tell us that it is has the same view. + send_peer_view_change(&mut virtual_overseer, &peer2, vec![test_state.relay_parent]).await; + + let mut connected = distribute_collation(&mut virtual_overseer, &test_state).await.connected; + connected.try_send((validator_id, peer.clone())).unwrap(); + connected.try_send((validator_id2, peer2.clone())).unwrap(); + + expect_declare_msg(&mut virtual_overseer, &test_state, &peer).await; + expect_declare_msg(&mut virtual_overseer, &test_state, &peer2).await; + + expect_advertise_collation_msg(&mut virtual_overseer, &test_state, &peer2, test_state.relay_parent).await; + + // The other validator announces that it changed its view. + send_peer_view_change(&mut virtual_overseer, &peer, vec![test_state.relay_parent]).await; + + // After changing the view we should receive the advertisement + expect_advertise_collation_msg(&mut virtual_overseer, &test_state, &peer, test_state.relay_parent).await; + }) + } + + #[test] + fn collate_on_two_different_relay_chain_blocks() { + let mut test_state = TestState::default(); + + test_harness(test_state.our_collator_pair.public(), |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + + let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + + let peer2 = test_state.current_group_validator_peer_ids()[1].clone(); + let validator_id2 = test_state.current_group_validator_authority_ids()[1].clone(); + + setup_system(&mut virtual_overseer, &test_state).await; + + // A validator connected to us + connect_peer(&mut virtual_overseer, peer.clone()).await; + + // Connect the second validator + connect_peer(&mut virtual_overseer, peer2.clone()).await; + + let mut connected = distribute_collation(&mut virtual_overseer, &test_state).await.connected; + connected.try_send((validator_id.clone(), peer.clone())).unwrap(); + connected.try_send((validator_id2.clone(), peer2.clone())).unwrap(); + + expect_declare_msg(&mut virtual_overseer, &test_state, &peer).await; + expect_declare_msg(&mut virtual_overseer, &test_state, &peer2).await; + + let old_relay_parent = test_state.relay_parent; + + // Advance to a new round, while informing the subsystem that the old and the new relay parent are active. + test_state.advance_to_new_round(&mut virtual_overseer, true).await; + + let mut connected = distribute_collation(&mut virtual_overseer, &test_state).await.connected; + connected.try_send((validator_id, peer.clone())).unwrap(); + connected.try_send((validator_id2, peer2.clone())).unwrap(); + + send_peer_view_change(&mut virtual_overseer, &peer, vec![old_relay_parent]).await; + expect_advertise_collation_msg(&mut virtual_overseer, &test_state, &peer, old_relay_parent).await; + + send_peer_view_change(&mut virtual_overseer, &peer2, vec![test_state.relay_parent]).await; + expect_advertise_collation_msg(&mut virtual_overseer, &test_state, &peer2, test_state.relay_parent).await; + }) + } + + #[test] + fn validator_reconnect_does_not_advertise_a_second_time() { + let test_state = TestState::default(); + + test_harness(test_state.our_collator_pair.public(), |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + + let peer = test_state.current_group_validator_peer_ids()[0].clone(); + let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + + setup_system(&mut virtual_overseer, &test_state).await; + + // A validator connected to us + connect_peer(&mut virtual_overseer, peer.clone()).await; + + let mut connected = distribute_collation(&mut virtual_overseer, &test_state).await.connected; + connected.try_send((validator_id.clone(), peer.clone())).unwrap(); + + expect_declare_msg(&mut virtual_overseer, &test_state, &peer).await; + send_peer_view_change(&mut virtual_overseer, &peer, vec![test_state.relay_parent]).await; + expect_advertise_collation_msg(&mut virtual_overseer, &test_state, &peer, test_state.relay_parent).await; + + // Disconnect and reconnect directly + disconnect_peer(&mut virtual_overseer, peer.clone()).await; + connect_peer(&mut virtual_overseer, peer.clone()).await; + send_peer_view_change(&mut virtual_overseer, &peer, vec![test_state.relay_parent]).await; + + assert!(overseer_recv_with_timeout(&mut virtual_overseer, TIMEOUT).await.is_none()); + }) } } diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 506dd7dcb2a311a3005a5997f2c30b74b5dc1492..ba1147fef94f55aa052da0b96694abe2131e697e 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -18,10 +18,10 @@ //! This subsystem implements both sides of the collator protocol. #![deny(missing_docs, unused_crate_dependencies)] +#![recursion_limit="256"] use std::time::Duration; -use futures::{channel::oneshot, FutureExt}; -use log::trace; +use futures::{channel::oneshot, FutureExt, TryFutureExt}; use thiserror::Error; use polkadot_subsystem::{ @@ -43,7 +43,7 @@ use polkadot_node_subsystem_util::{ mod collator_side; mod validator_side; -const TARGET: &'static str = "colp"; +const LOG_TARGET: &'static str = "collator_protocol"; const REQUEST_TIMEOUT: Duration = Duration::from_secs(1); #[derive(Debug, Error)] @@ -60,16 +60,6 @@ enum Error { Prometheus(#[from] prometheus::PrometheusError), } -impl From for Error { - fn from(me: util::validator_discovery::Error) -> Self { - match me { - util::validator_discovery::Error::Subsystem(s) => Error::Subsystem(s), - util::validator_discovery::Error::RuntimeApi(ra) => Error::RuntimeApi(ra), - util::validator_discovery::Error::Oneshot(c) => Error::Oneshot(c), - } - } -} - type Result = std::result::Result; /// What side of the collator protocol is being engaged @@ -96,17 +86,18 @@ impl CollatorProtocolSubsystem { } } + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run(self, ctx: Context) -> Result<()> where Context: SubsystemContext, { match self.protocol_side { - ProtocolSide::Validator(metrics) => validator_side::run( + ProtocolSide::Validator(metrics) => validator_side::run( ctx, REQUEST_TIMEOUT, metrics, ).await, - ProtocolSide::Collator(id, metrics) => collator_side::run( + ProtocolSide::Collator(id, metrics) => collator_side::run( ctx, id, metrics, @@ -122,26 +113,32 @@ where Context: SubsystemContext + Sync + Send, { fn start(self, ctx: Context) -> SpawnedSubsystem { + let future = self + .run(ctx) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(); + SpawnedSubsystem { name: "collator-protocol-subsystem", - future: self.run(ctx).map(|_| ()).boxed(), + future, } } } /// Modify the reputation of a peer based on its behavior. -async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) -> Result<()> +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] +async fn modify_reputation(ctx: &mut Context, peer: PeerId, rep: Rep) where Context: SubsystemContext, { - trace!( - target: TARGET, - "Reputation change of {:?} for peer {:?}", rep, peer, + tracing::trace!( + target: LOG_TARGET, + rep = ?rep, + peer_id = %peer, + "reputation change for peer", ); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep), - )).await?; - - Ok(()) + )).await; } diff --git a/node/network/collator-protocol/src/validator_side.rs b/node/network/collator-protocol/src/validator_side.rs index fa3ce8e8f19d943f2f09066ab9c869ff8deef63d..3af5aba5e415497050a481897a41ef2aeeab2802 100644 --- a/node/network/collator-protocol/src/validator_side.rs +++ b/node/network/collator-protocol/src/validator_side.rs @@ -20,11 +20,11 @@ use std::task::Poll; use futures::{ StreamExt, + FutureExt, channel::oneshot, future::BoxFuture, stream::FuturesUnordered, }; -use log::{trace, warn}; use polkadot_primitives::v1::{ Id as ParaId, CandidateReceipt, CollatorId, Hash, PoV, @@ -44,7 +44,7 @@ use polkadot_node_subsystem_util::{ metrics::{self, prometheus}, }; -use super::{modify_reputation, TARGET, Result}; +use super::{modify_reputation, LOG_TARGET, Result}; const COST_UNEXPECTED_MESSAGE: Rep = Rep::new(-10, "An unexpected message"); const COST_REQUEST_TIMED_OUT: Rep = Rep::new(-20, "A collation request has timed out"); @@ -63,11 +63,23 @@ impl Metrics { } } } + + /// Provide a timer for `process_msg` which observes on drop. + fn time_process_msg(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_msg.start_timer()) + } + + /// Provide a timer for `handle_collation_request_result` which observes on drop. + fn time_handle_collation_request_result(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_collation_request_result.start_timer()) + } } #[derive(Clone)] struct MetricsInner { collation_requests: prometheus::CounterVec, + process_msg: prometheus::Histogram, + handle_collation_request_result: prometheus::Histogram, } impl metrics::Metrics for Metrics { @@ -84,7 +96,25 @@ impl metrics::Metrics for Metrics { &["success"], )?, registry, - )? + )?, + process_msg: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_collator_protocol_validator_process_msg", + "Time spent within `collator_protocol_validator::process_msg`", + ) + )?, + registry, + )?, + handle_collation_request_result: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_collator_protocol_validator_handle_collation_request_result", + "Time spent within `collator_protocol_validator::handle_collation_request_result`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) @@ -122,7 +152,6 @@ impl CollationRequest { request_id, } = self; - match received.timeout(timeout).await { None => Timeout(request_id), Some(_) => Received(request_id), @@ -188,6 +217,7 @@ struct State { } /// Another subsystem has requested to fetch collations on a particular leaf for some para. +#[tracing::instrument(level = "trace", skip(ctx, state, tx), fields(subsystem = LOG_TARGET))] async fn fetch_collation( ctx: &mut Context, state: &mut State, @@ -195,7 +225,7 @@ async fn fetch_collation( collator_id: CollatorId, para_id: ParaId, tx: oneshot::Sender<(CandidateReceipt, PoV)> -) -> Result<()> +) where Context: SubsystemContext { @@ -206,12 +236,13 @@ where if let Err(e) = tx.send((collation.1.clone(), collation.2.clone())) { // We do not want this to be fatal because the receving subsystem // may have closed the results channel for some reason. - trace!( - target: TARGET, - "Failed to send collation: {:?}", e, + tracing::trace!( + target: LOG_TARGET, + err = ?e, + "Failed to send collation", ); } - return Ok(()); + return; } } } @@ -231,18 +262,17 @@ where // Request the collation. // Assume it is `request_collation`'s job to check and ignore duplicate requests. if let Some(relevant_advertiser) = relevant_advertiser { - request_collation(ctx, state, relay_parent, para_id, relevant_advertiser, tx).await?; + request_collation(ctx, state, relay_parent, para_id, relevant_advertiser, tx).await; } - - Ok(()) } /// Report a collator for some malicious actions. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn report_collator( ctx: &mut Context, state: &mut State, id: CollatorId, -) -> Result<()> +) where Context: SubsystemContext { @@ -251,34 +281,32 @@ where // is a tolerable thing to do. for (k, v) in state.known_collators.iter() { if *v == id { - modify_reputation(ctx, k.clone(), COST_REPORT_BAD).await?; + modify_reputation(ctx, k.clone(), COST_REPORT_BAD).await; } } - - Ok(()) } /// Some other subsystem has reported a collator as a good one, bump reputation. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn note_good_collation( ctx: &mut Context, state: &mut State, id: CollatorId, -) -> Result<()> +) where Context: SubsystemContext { for (peer_id, collator_id) in state.known_collators.iter() { if id == *collator_id { - modify_reputation(ctx, peer_id.clone(), BENEFIT_NOTIFY_GOOD).await?; + modify_reputation(ctx, peer_id.clone(), BENEFIT_NOTIFY_GOOD).await; } } - - Ok(()) } /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be cancelled. /// - Advertisements by this peer that are no longer relevant have to be removed. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_peer_view_change( state: &mut State, peer_id: PeerId, @@ -320,6 +348,7 @@ async fn handle_peer_view_change( /// - Cancel all ongoing requests /// - Reply to interested parties if any /// - Store collation. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn received_collation( ctx: &mut Context, state: &mut State, @@ -327,7 +356,7 @@ async fn received_collation( request_id: RequestId, receipt: CandidateReceipt, pov: PoV, -) -> Result<()> +) where Context: SubsystemContext { @@ -355,11 +384,9 @@ where // If this collation is not just a delayed one that we were expecting, // but our view has moved on, in that case modify peer's reputation. if !state.recently_removed_heads.contains(&relay_parent) { - modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await?; + modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await; } } - - Ok(()) } /// Request a collation from the network. @@ -368,6 +395,7 @@ where /// - Check if the requested collation is in our view. /// - Update PerRequest records with the `result` field if necessary. /// And as such invocations of this function may rely on that. +#[tracing::instrument(level = "trace", skip(ctx, state, result), fields(subsystem = LOG_TARGET))] async fn request_collation( ctx: &mut Context, state: &mut State, @@ -375,26 +403,30 @@ async fn request_collation( para_id: ParaId, peer_id: PeerId, result: oneshot::Sender<(CandidateReceipt, PoV)>, -) -> Result<()> +) where Context: SubsystemContext { if !state.view.contains(&relay_parent) { - trace!( - target: TARGET, - "Collation by {} on {} on relay parent {} is no longer in view", - peer_id, para_id, relay_parent, + tracing::trace!( + target: LOG_TARGET, + peer_id = %peer_id, + para_id = %para_id, + relay_parent = %relay_parent, + "collation is no longer in view", ); - return Ok(()); + return; } if state.requested_collations.contains_key(&(relay_parent, para_id.clone(), peer_id.clone())) { - trace!( - target: TARGET, - "Collation by {} on {} on relay parent {} has already been requested", - peer_id, para_id, relay_parent, + tracing::trace!( + target: LOG_TARGET, + peer_id = %peer_id, + para_id = %para_id, + relay_parent = %relay_parent, + "collation has already been requested", ); - return Ok(()); + return; } let request_id = state.next_request_id; @@ -417,9 +449,7 @@ where state.requests_info.insert(request_id, per_request); - state.requests_in_progress.push(Box::pin(async move { - request.wait().await - })); + state.requests_in_progress.push(request.wait().boxed()); let wire_message = protocol_v1::CollatorProtocolMessage::RequestCollation( request_id, @@ -432,18 +462,17 @@ where vec![peer_id], protocol_v1::CollationProtocol::CollatorProtocol(wire_message), ) - )).await?; - - Ok(()) + )).await; } /// Notify `CandidateSelectionSubsystem` that a collation has been advertised. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn notify_candidate_selection( ctx: &mut Context, collator: CollatorId, relay_parent: Hash, para_id: ParaId, -) -> Result<()> +) where Context: SubsystemContext { @@ -453,18 +482,17 @@ where para_id, collator, ) - )).await?; - - Ok(()) + )).await; } /// Networking message has been received. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, origin: PeerId, msg: protocol_v1::CollatorProtocolMessage, -)-> Result<()> +) where Context: SubsystemContext { @@ -479,24 +507,23 @@ where state.advertisements.entry(origin.clone()).or_default().insert((para_id, relay_parent)); if let Some(collator) = state.known_collators.get(&origin) { - notify_candidate_selection(ctx, collator.clone(), relay_parent, para_id).await?; + notify_candidate_selection(ctx, collator.clone(), relay_parent, para_id).await; } } RequestCollation(_, _, _) => { // This is a validator side of the protocol, collation requests are not expected here. - return modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await; + modify_reputation(ctx, origin, COST_UNEXPECTED_MESSAGE).await; } Collation(request_id, receipt, pov) => { - received_collation(ctx, state, origin, request_id, receipt, pov).await?; + received_collation(ctx, state, origin, request_id, receipt, pov).await; } } - - Ok(()) } /// A leaf has become inactive so we want to /// - Cancel all ongoing collation requests that are on top of that leaf. /// - Remove all stored collations relevant to that leaf. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn remove_relay_parent( state: &mut State, relay_parent: Hash, @@ -522,6 +549,7 @@ async fn remove_relay_parent( } /// Our view has changed. +#[tracing::instrument(level = "trace", skip(state), fields(subsystem = LOG_TARGET))] async fn handle_our_view_change( state: &mut State, view: View, @@ -545,11 +573,12 @@ async fn handle_our_view_change( } /// A request has timed out. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn request_timed_out( ctx: &mut Context, state: &mut State, id: RequestId, -) -> Result<()> +) where Context: SubsystemContext { @@ -561,15 +590,14 @@ where if let Some(_) = state.requests_info.remove(&id) { let peer_id = key.2; - modify_reputation(ctx, peer_id, COST_REQUEST_TIMED_OUT).await?; + modify_reputation(ctx, peer_id, COST_REQUEST_TIMED_OUT).await; } } } - - Ok(()) } /// Bridge event switch. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_msg( ctx: &mut Context, state: &mut State, @@ -595,7 +623,7 @@ where handle_our_view_change(state, view).await?; }, PeerMessage(remote, msg) => { - process_incoming_peer_message(ctx, state, remote, msg).await?; + process_incoming_peer_message(ctx, state, remote, msg).await; } } @@ -603,37 +631,41 @@ where } /// The main message receiver switch. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn process_msg( ctx: &mut Context, msg: CollatorProtocolMessage, state: &mut State, -) -> Result<()> +) where Context: SubsystemContext { use CollatorProtocolMessage::*; + let _timer = state.metrics.time_process_msg(); + match msg { CollateOn(id) => { - warn!( - target: TARGET, - "CollateOn({}) message is not expected on the validator side of the protocol", id, + tracing::warn!( + target: LOG_TARGET, + para_id = %id, + "CollateOn message is not expected on the validator side of the protocol", ); } DistributeCollation(_, _) => { - warn!( - target: TARGET, + tracing::warn!( + target: LOG_TARGET, "DistributeCollation message is not expected on the validator side of the protocol", ); } FetchCollation(relay_parent, collator_id, para_id, tx) => { - fetch_collation(ctx, state, relay_parent, collator_id, para_id, tx).await?; + fetch_collation(ctx, state, relay_parent, collator_id, para_id, tx).await; } ReportCollator(id) => { - report_collator(ctx, state, id).await?; + report_collator(ctx, state, id).await; } NoteGoodCollation(id) => { - note_good_collation(ctx, state, id).await?; + note_good_collation(ctx, state, id).await; } NetworkBridgeUpdateV1(event) => { if let Err(e) = handle_network_msg( @@ -641,18 +673,18 @@ where state, event, ).await { - warn!( - target: TARGET, - "Failed to handle incoming network message: {:?}", e, + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to handle incoming network message", ); } } } - - Ok(()) } /// The main run loop. +#[tracing::instrument(skip(ctx, metrics), fields(subsystem = LOG_TARGET))] pub(crate) async fn run( mut ctx: Context, request_timeout: Duration, @@ -673,10 +705,10 @@ where loop { if let Poll::Ready(msg) = futures::poll!(ctx.recv()) { let msg = msg?; - trace!(target: TARGET, "Received a message {:?}", msg); + tracing::trace!(target: LOG_TARGET, msg = ?msg, "received a message"); match msg { - Communication { msg } => process_msg(&mut ctx, msg, &mut state).await?, + Communication { msg } => process_msg(&mut ctx, msg, &mut state).await, Signal(BlockFinalized(_)) => {} Signal(ActiveLeaves(_)) => {} Signal(Conclude) => { break } @@ -685,12 +717,14 @@ where } while let Poll::Ready(Some(request)) = futures::poll!(state.requests_in_progress.next()) { + let _timer = state.metrics.time_handle_collation_request_result(); + // Request has timed out, we need to penalize the collator and re-send the request // if the chain has not moved on yet. match request { CollationRequestResult::Timeout(id) => { - trace!(target: TARGET, "Request timed out {}", id); - request_timed_out(&mut ctx, &mut state, id).await?; + tracing::trace!(target: LOG_TARGET, id, "request timed out"); + request_timed_out(&mut ctx, &mut state, id).await; } CollationRequestResult::Received(id) => { state.requests_info.remove(&id); @@ -761,7 +795,7 @@ mod tests { log::LevelFilter::Trace, ) .filter( - Some(TARGET), + Some(LOG_TARGET), log::LevelFilter::Trace, ) .try_init(); @@ -786,7 +820,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, msg: CollatorProtocolMessage, ) { - log::trace!("Sending message:\n{:?}", &msg); + tracing::trace!("Sending message:\n{:?}", &msg); overseer .send(FromOverseer::Communication { msg }) .timeout(TIMEOUT) @@ -801,7 +835,7 @@ mod tests { .await .expect(&format!("{:?} is enough to receive messages.", TIMEOUT)); - log::trace!("Received message:\n{:?}", &msg); + tracing::trace!("Received message:\n{:?}", &msg); msg } @@ -810,7 +844,7 @@ mod tests { overseer: &mut test_helpers::TestSubsystemContextHandle, timeout: Duration, ) -> Option { - log::trace!("Waiting for message..."); + tracing::trace!("Waiting for message..."); overseer .recv() .timeout(timeout) @@ -828,7 +862,7 @@ mod tests { } = test_harness; let pair = CollatorPair::generate().0; - log::trace!("activating"); + tracing::trace!("activating"); overseer_send( &mut virtual_overseer, diff --git a/node/network/pov-distribution/Cargo.toml b/node/network/pov-distribution/Cargo.toml index 2449a63016f72c2ee590a3d5f1195c39b3898399..ee0cbb39e40c3982978c486e6ad902d8451e5463 100644 --- a/node/network/pov-distribution/Cargo.toml +++ b/node/network/pov-distribution/Cargo.toml @@ -5,14 +5,23 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" +futures = "0.3.8" +thiserror = "1.0.21" +tracing = "0.1.22" +tracing-futures = "0.2.4" + polkadot-primitives = { path = "../../../primitives" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../../network/protocol" } [dev-dependencies] -assert_matches = "1.3.0" -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +assert_matches = "1.4.0" +env_logger = "0.8.1" +log = "0.4.11" +smallvec = "1.5.1" + +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } + polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/runtime/parachains/src/validity.rs b/node/network/pov-distribution/src/error.rs similarity index 58% rename from runtime/parachains/src/validity.rs rename to node/network/pov-distribution/src/error.rs index 1f45de2df705364ccbaa6a14810b43d0900ee2e2..754c1e56c5234828305214742a77d66989b1c302 100644 --- a/runtime/parachains/src/validity.rs +++ b/node/network/pov-distribution/src/error.rs @@ -13,3 +13,21 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . + +//! The `Error` and `Result` types used by the subsystem. + +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Error { + #[error(transparent)] + Subsystem(#[from] polkadot_subsystem::SubsystemError), + #[error(transparent)] + OneshotRecv(#[from] futures::channel::oneshot::Canceled), + #[error(transparent)] + Runtime(#[from] polkadot_subsystem::errors::RuntimeApiError), + #[error(transparent)] + Util(#[from] polkadot_node_subsystem_util::Error), +} + +pub type Result = std::result::Result; diff --git a/node/network/pov-distribution/src/lib.rs b/node/network/pov-distribution/src/lib.rs index 2a6f81361a6021b34c4a436eda5ab278e2256016..9d8df343b2762cc111bc42f5dc49eccf58beb168 100644 --- a/node/network/pov-distribution/src/lib.rs +++ b/node/network/pov-distribution/src/lib.rs @@ -22,15 +22,21 @@ #![deny(unused_crate_dependencies)] #![warn(missing_docs)] -use polkadot_primitives::v1::{Hash, PoV, CandidateDescriptor}; +use polkadot_primitives::v1::{ + Hash, PoV, CandidateDescriptor, ValidatorId, Id as ParaId, CoreIndex, CoreState, +}; use polkadot_subsystem::{ - ActiveLeavesUpdate, OverseerSignal, SubsystemContext, Subsystem, SubsystemResult, SubsystemError, + ActiveLeavesUpdate, OverseerSignal, SubsystemContext, SubsystemResult, SubsystemError, Subsystem, FromOverseer, SpawnedSubsystem, messages::{ - PoVDistributionMessage, RuntimeApiMessage, RuntimeApiRequest, AllMessages, NetworkBridgeMessage, + PoVDistributionMessage, AllMessages, NetworkBridgeMessage, }, }; use polkadot_node_subsystem_util::{ + validator_discovery, + request_validators_ctx, + request_validator_groups_ctx, + request_availability_cores_ctx, metrics::{self, prometheus}, }; use polkadot_node_network_protocol::{ @@ -43,6 +49,11 @@ use futures::channel::oneshot; use std::collections::{hash_map::{Entry, HashMap}, HashSet}; use std::sync::Arc; +mod error; + +#[cfg(test)] +mod tests; + const COST_APPARENT_FLOOD: Rep = Rep::new(-500, "Peer appears to be flooding us with PoV requests"); const COST_UNEXPECTED_POV: Rep = Rep::new(-500, "Peer sent us an unexpected PoV"); const COST_AWAITED_NOT_IN_VIEW: Rep @@ -52,6 +63,8 @@ const BENEFIT_FRESH_POV: Rep = Rep::new(25, "Peer supplied us with an awaited Po const BENEFIT_LATE_POV: Rep = Rep::new(10, "Peer supplied us with an awaited PoV, \ but was not the first to do so"); +const LOG_TARGET: &str = "pov_distribution"; + /// The PoV Distribution Subsystem. pub struct PoVDistribution { // Prometheus metrics @@ -66,7 +79,6 @@ impl Subsystem for PoVDistribution // within `run`. let future = self.run(ctx) .map_err(|e| SubsystemError::with_origin("pov-distribution", e)) - .map(|_| ()) .boxed(); SpawnedSubsystem { name: "pov-distribution-subsystem", @@ -75,20 +87,33 @@ impl Subsystem for PoVDistribution } } +#[derive(Default)] struct State { + /// A state of things going on on a per-relay-parent basis. relay_parent_state: HashMap, + + /// Info on peers. peer_state: HashMap, + + /// Our own view. our_view: View, + + /// Connect to relevant groups of validators at different relay parents. + connection_requests: validator_discovery::ConnectionRequests, + + /// Metrics. metrics: Metrics, } struct BlockBasedState { known: HashMap>, + /// All the PoVs we are or were fetching, coupled with channels expecting the data. /// /// This may be an empty list, which indicates that we were once awaiting this PoV but have /// received it already. fetching: HashMap>>>, + n_validators: usize, } @@ -116,6 +141,7 @@ fn send_pov_message(relay_parent: Hash, pov_hash: Hash, pov: PoV) /// Handles the signal. If successful, returns `true` if the subsystem should conclude, /// `false` otherwise. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_signal( state: &mut State, ctx: &mut impl SubsystemContext, @@ -124,38 +150,48 @@ async fn handle_signal( match signal { OverseerSignal::Conclude => Ok(true), OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, deactivated }) => { - for relay_parent in activated { - let (vals_tx, vals_rx) = oneshot::channel(); - ctx.send_message(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::Validators(vals_tx), - ))).await?; + let _timer = state.metrics.time_handle_signal(); - let n_validators = match vals_rx.await? { - Ok(v) => v.len(), + for relay_parent in activated { + match request_validators_ctx(relay_parent.clone(), ctx).await { + Ok(vals_rx) => { + let n_validators = match vals_rx.await? { + Ok(v) => v.len(), + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Error fetching validators from runtime API for active leaf", + ); + + // Not adding bookkeeping here might make us behave funny, but we + // shouldn't take down the node on spurious runtime API errors. + // + // and this is "behave funny" as in be bad at our job, but not in any + // slashable or security-related way. + continue; + } + }; + + state.relay_parent_state.insert(relay_parent, BlockBasedState { + known: HashMap::new(), + fetching: HashMap::new(), + n_validators, + }); + } Err(e) => { - log::warn!(target: "pov_distribution", - "Error fetching validators from runtime API for active leaf: {:?}", - e + // continue here also as above. + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Error fetching validators from runtime API for active leaf", ); - - // Not adding bookkeeping here might make us behave funny, but we - // shouldn't take down the node on spurious runtime API errors. - // - // and this is "behave funny" as in be bad at our job, but not in any - // slashable or security-related way. - continue; } - }; - - state.relay_parent_state.insert(relay_parent, BlockBasedState { - known: HashMap::new(), - fetching: HashMap::new(), - n_validators: n_validators, - }); + } } for relay_parent in deactivated { + state.connection_requests.remove(&relay_parent); state.relay_parent_state.remove(&relay_parent); } @@ -168,12 +204,13 @@ async fn handle_signal( /// Notify peers that we are awaiting a given PoV hash. /// /// This only notifies peers who have the relay parent in their view. +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] async fn notify_all_we_are_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, relay_parent: Hash, pov_hash: Hash, -) -> SubsystemResult<()> { +) { // We use `awaited` as a proxy for which heads are in the peer's view. let peers_to_send: Vec<_> = peers.iter() .filter_map(|(peer, state)| if state.awaited.contains_key(&relay_parent) { @@ -183,23 +220,26 @@ async fn notify_all_we_are_awaiting( }) .collect(); - if peers_to_send.is_empty() { return Ok(()) } + if peers_to_send.is_empty() { + return; + } let payload = awaiting_message(relay_parent, vec![pov_hash]); ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( peers_to_send, payload, - ))).await + ))).await; } /// Notify one peer about everything we're awaiting at a given relay-parent. +#[tracing::instrument(level = "trace", skip(ctx, relay_parent_state), fields(subsystem = LOG_TARGET))] async fn notify_one_we_are_awaiting_many( peer: &PeerId, ctx: &mut impl SubsystemContext, relay_parent_state: &HashMap, relay_parent: Hash, -) -> SubsystemResult<()> { +) { let awaiting_hashes = relay_parent_state.get(&relay_parent).into_iter().flat_map(|s| { // Send the peer everything we are fetching at this relay-parent s.fetching.iter() @@ -207,17 +247,20 @@ async fn notify_one_we_are_awaiting_many( .map(|(pov_hash, _)| *pov_hash) }).collect::>(); - if awaiting_hashes.is_empty() { return Ok(()) } + if awaiting_hashes.is_empty() { + return; + } let payload = awaiting_message(relay_parent, awaiting_hashes); ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( vec![peer.clone()], payload, - ))).await + ))).await; } /// Distribute a PoV to peers who are awaiting it. +#[tracing::instrument(level = "trace", skip(peers, ctx, metrics, pov), fields(subsystem = LOG_TARGET))] async fn distribute_to_awaiting( peers: &mut HashMap, ctx: &mut impl SubsystemContext, @@ -225,7 +268,7 @@ async fn distribute_to_awaiting( relay_parent: Hash, pov_hash: Hash, pov: &PoV, -) -> SubsystemResult<()> { +) { // Send to all peers who are awaiting the PoV and have that relay-parent in their view. // // Also removes it from their awaiting set. @@ -239,36 +282,106 @@ async fn distribute_to_awaiting( })) .collect(); - if peers_to_send.is_empty() { return Ok(()) } + if peers_to_send.is_empty() { return; } let payload = send_pov_message(relay_parent, pov_hash, pov.clone()); ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( peers_to_send, payload, - ))).await?; + ))).await; metrics.on_pov_distributed(); +} + +/// Get the Id of the Core that is assigned to the para being collated on if any +/// and the total number of cores. +async fn determine_core( + ctx: &mut impl SubsystemContext, + para_id: ParaId, + relay_parent: Hash, +) -> error::Result> { + let cores = request_availability_cores_ctx(relay_parent, ctx).await?.await??; + + for (idx, core) in cores.iter().enumerate() { + if let CoreState::Scheduled(occupied) = core { + if occupied.para_id == para_id { + return Ok(Some(((idx as u32).into(), cores.len()))); + } + } + } - Ok(()) + Ok(None) +} + +/// Figure out a group of validators assigned to a given `ParaId`. +async fn determine_validators_for_core( + ctx: &mut impl SubsystemContext, + core_index: CoreIndex, + num_cores: usize, + relay_parent: Hash, +) -> error::Result>> { + let groups = request_validator_groups_ctx(relay_parent, ctx).await?.await??; + + let group_index = groups.1.group_for_core(core_index, num_cores); + + let connect_to_validators = match groups.0.get(group_index.0 as usize) { + Some(group) => group.clone(), + None => return Ok(None), + }; + + let validators = request_validators_ctx(relay_parent, ctx).await?.await??; + + let validators = connect_to_validators + .into_iter() + .map(|idx| validators[idx as usize].clone()) + .collect(); + + Ok(Some(validators)) +} + +async fn determine_relevant_validators( + ctx: &mut impl SubsystemContext, + relay_parent: Hash, + para_id: ParaId, +) -> error::Result>> { + // Determine which core the para_id is assigned to. + let (core, num_cores) = match determine_core(ctx, para_id, relay_parent).await? { + Some(core) => core, + None => { + tracing::warn!( + target: LOG_TARGET, + "Looks like no core is assigned to {:?} at {:?}", + para_id, + relay_parent, + ); + + return Ok(None); + } + }; + + determine_validators_for_core(ctx, core, num_cores, relay_parent).await } /// Handles a `FetchPoV` message. +#[tracing::instrument(level = "trace", skip(ctx, state, response_sender), fields(subsystem = LOG_TARGET))] async fn handle_fetch( state: &mut State, ctx: &mut impl SubsystemContext, relay_parent: Hash, descriptor: CandidateDescriptor, response_sender: oneshot::Sender>, -) -> SubsystemResult<()> { +) { + let _timer = state.metrics.time_handle_fetch(); + let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { Some(s) => s, - None => return Ok(()), + None => return, }; if let Some(pov) = relay_parent_state.known.get(&descriptor.pov_hash) { let _ = response_sender.send(pov.clone()); - return Ok(()); + return; } { @@ -276,18 +389,48 @@ async fn handle_fetch( Entry::Occupied(mut e) => { // we are already awaiting this PoV if there is an entry. e.get_mut().push(response_sender); - return Ok(()); + return; } Entry::Vacant(e) => { - e.insert(vec![response_sender]); + if let Ok(Some(relevant_validators)) = determine_relevant_validators( + ctx, + relay_parent, + descriptor.para_id, + ).await { + // We only need one connection request per (relay_parent, para_id) + // so here we take this shortcut to avoid calling `connect_to_validators` + // more than once. + if !state.connection_requests.contains_request(&relay_parent) { + match validator_discovery::connect_to_validators( + ctx, + relay_parent, + relevant_validators.clone(), + ).await { + Ok(new_connection_request) => { + state.connection_requests.put(relay_parent, new_connection_request); + } + Err(e) => { + tracing::debug!( + target: LOG_TARGET, + "Failed to create a validator connection request {:?}", + e, + ); + } + } + } + + e.insert(vec![response_sender]); + } } } } if relay_parent_state.fetching.len() > 2 * relay_parent_state.n_validators { - log::warn!("Other subsystems have requested PoV distribution to \ - fetch more PoVs than reasonably expected: {}", relay_parent_state.fetching.len()); - return Ok(()); + tracing::warn!( + relay_parent_state.fetching.len = relay_parent_state.fetching.len(), + "other subsystems have requested PoV distribution to fetch more PoVs than reasonably expected", + ); + return; } // Issue an `Awaiting` message to all peers with this in their view. @@ -300,16 +443,19 @@ async fn handle_fetch( } /// Handles a `DistributePoV` message. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn handle_distribute( state: &mut State, ctx: &mut impl SubsystemContext, relay_parent: Hash, descriptor: CandidateDescriptor, pov: Arc, -) -> SubsystemResult<()> { +) { + let _timer = state.metrics.time_handle_distribute(); + let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { - None => return Ok(()), Some(s) => s, + None => return, }; if let Some(our_awaited) = relay_parent_state.fetching.get_mut(&descriptor.pov_hash) { @@ -335,31 +481,33 @@ async fn handle_distribute( } /// Report a reputation change for a peer. +#[tracing::instrument(level = "trace", skip(ctx), fields(subsystem = LOG_TARGET))] async fn report_peer( ctx: &mut impl SubsystemContext, peer: PeerId, rep: Rep, -) -> SubsystemResult<()> { +) { ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::ReportPeer(peer, rep))).await } /// Handle a notification from a peer that they are awaiting some PoVs. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_awaiting( state: &mut State, ctx: &mut impl SubsystemContext, peer: PeerId, relay_parent: Hash, pov_hashes: Vec, -) -> SubsystemResult<()> { +) { if !state.our_view.0.contains(&relay_parent) { - report_peer(ctx, peer, COST_AWAITED_NOT_IN_VIEW).await?; - return Ok(()); + report_peer(ctx, peer, COST_AWAITED_NOT_IN_VIEW).await; + return; } let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { None => { - log::warn!("PoV Distribution relay parent state out-of-sync with our view"); - return Ok(()); + tracing::warn!("PoV Distribution relay parent state out-of-sync with our view"); + return; } Some(s) => s, }; @@ -368,8 +516,8 @@ async fn handle_awaiting( state.peer_state.get_mut(&peer).and_then(|s| s.awaited.get_mut(&relay_parent)) { None => { - report_peer(ctx, peer, COST_AWAITED_NOT_IN_VIEW).await?; - return Ok(()); + report_peer(ctx, peer, COST_AWAITED_NOT_IN_VIEW).await; + return; } Some(a) => a, }; @@ -383,21 +531,20 @@ async fn handle_awaiting( let payload = send_pov_message(relay_parent, pov_hash, (&**pov).clone()); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload) - )).await?; + )).await; } else { peer_awaiting.insert(pov_hash); } } } else { - report_peer(ctx, peer, COST_APPARENT_FLOOD).await?; + report_peer(ctx, peer, COST_APPARENT_FLOOD).await; } - - Ok(()) } /// Handle an incoming PoV from our peer. Reports them if unexpected, rewards them if not. /// /// Completes any requests awaiting that PoV. +#[tracing::instrument(level = "trace", skip(ctx, state, pov), fields(subsystem = LOG_TARGET))] async fn handle_incoming_pov( state: &mut State, ctx: &mut impl SubsystemContext, @@ -405,11 +552,11 @@ async fn handle_incoming_pov( relay_parent: Hash, pov_hash: Hash, pov: PoV, -) -> SubsystemResult<()> { +) { let relay_parent_state = match state.relay_parent_state.get_mut(&relay_parent) { None => { - report_peer(ctx, peer, COST_UNEXPECTED_POV).await?; - return Ok(()); + report_peer(ctx, peer, COST_UNEXPECTED_POV).await; + return; }, Some(r) => r, }; @@ -418,16 +565,16 @@ async fn handle_incoming_pov( // Do validity checks and complete all senders awaiting this PoV. let fetching = match relay_parent_state.fetching.get_mut(&pov_hash) { None => { - report_peer(ctx, peer, COST_UNEXPECTED_POV).await?; - return Ok(()); + report_peer(ctx, peer, COST_UNEXPECTED_POV).await; + return; } Some(f) => f, }; let hash = pov.hash(); if hash != pov_hash { - report_peer(ctx, peer, COST_UNEXPECTED_POV).await?; - return Ok(()); + report_peer(ctx, peer, COST_UNEXPECTED_POV).await; + return; } let pov = Arc::new(pov); @@ -435,10 +582,10 @@ async fn handle_incoming_pov( if fetching.is_empty() { // fetching is empty whenever we were awaiting something and // it was completed afterwards. - report_peer(ctx, peer.clone(), BENEFIT_LATE_POV).await?; + report_peer(ctx, peer.clone(), BENEFIT_LATE_POV).await; } else { // fetching is non-empty when the peer just provided us with data we needed. - report_peer(ctx, peer.clone(), BENEFIT_FRESH_POV).await?; + report_peer(ctx, peer.clone(), BENEFIT_FRESH_POV).await; } for response_sender in fetching.drain(..) { @@ -464,20 +611,26 @@ async fn handle_incoming_pov( ).await } +/// Handles a newly connected validator in the context of some relay leaf. +fn handle_validator_connected(state: &mut State, peer_id: PeerId) { + state.peer_state.entry(peer_id).or_default(); +} + /// Handles a network bridge update. +#[tracing::instrument(level = "trace", skip(ctx, state), fields(subsystem = LOG_TARGET))] async fn handle_network_update( state: &mut State, ctx: &mut impl SubsystemContext, update: NetworkBridgeEvent, -) -> SubsystemResult<()> { +) { + let _timer = state.metrics.time_handle_network_update(); + match update { NetworkBridgeEvent::PeerConnected(peer, _observed_role) => { - state.peer_state.insert(peer, PeerState { awaited: HashMap::new() }); - Ok(()) + handle_validator_connected(state, peer); } NetworkBridgeEvent::PeerDisconnected(peer) => { state.peer_state.remove(&peer); - Ok(()) } NetworkBridgeEvent::PeerViewChange(peer_id, view) => { if let Some(peer_state) = state.peer_state.get_mut(&peer_id) { @@ -495,12 +648,11 @@ async fn handle_network_update( ctx, &state.relay_parent_state, *relay_parent, - ).await?; + ).await; } } } - Ok(()) } NetworkBridgeEvent::PeerMessage(peer, message) => { match message { @@ -525,7 +677,6 @@ async fn handle_network_update( } NetworkBridgeEvent::OurViewChange(view) => { state.our_view = view; - Ok(()) } } } @@ -536,46 +687,55 @@ impl PoVDistribution { Self { metrics } } + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, ) -> SubsystemResult<()> { - let mut state = State { - relay_parent_state: HashMap::new(), - peer_state: HashMap::new(), - our_view: View(Vec::new()), - metrics: self.metrics, - }; + let mut state = State::default(); + state.metrics = self.metrics; loop { - match ctx.recv().await? { - FromOverseer::Signal(signal) => if handle_signal(&mut state, &mut ctx, signal).await? { - return Ok(()); - }, - FromOverseer::Communication { msg } => match msg { - PoVDistributionMessage::FetchPoV(relay_parent, descriptor, response_sender) => - handle_fetch( - &mut state, - &mut ctx, - relay_parent, - descriptor, - response_sender, - ).await?, - PoVDistributionMessage::DistributePoV(relay_parent, descriptor, pov) => - handle_distribute( + // `select_biased` is used since receiving connection notifications and + // peer view update messages may be racy and we want connection notifications + // first. + futures::select_biased! { + v = state.connection_requests.next().fuse() => handle_validator_connected(&mut state, v.peer_id), + v = ctx.recv().fuse() => { + match v? { + FromOverseer::Signal(signal) => if handle_signal( &mut state, &mut ctx, - relay_parent, - descriptor, - pov, - ).await?, - PoVDistributionMessage::NetworkBridgeUpdateV1(event) => - handle_network_update( - &mut state, - &mut ctx, - event, - ).await?, - }, + signal, + ).await? { + return Ok(()); + } + FromOverseer::Communication { msg } => match msg { + PoVDistributionMessage::FetchPoV(relay_parent, descriptor, response_sender) => + handle_fetch( + &mut state, + &mut ctx, + relay_parent, + descriptor, + response_sender, + ).await, + PoVDistributionMessage::DistributePoV(relay_parent, descriptor, pov) => + handle_distribute( + &mut state, + &mut ctx, + relay_parent, + descriptor, + pov, + ).await, + PoVDistributionMessage::NetworkBridgeUpdateV1(event) => + handle_network_update( + &mut state, + &mut ctx, + event, + ).await, + } + } + } } } } @@ -586,6 +746,10 @@ impl PoVDistribution { #[derive(Clone)] struct MetricsInner { povs_distributed: prometheus::Counter, + handle_signal: prometheus::Histogram, + handle_fetch: prometheus::Histogram, + handle_distribute: prometheus::Histogram, + handle_network_update: prometheus::Histogram, } /// Availability Distribution metrics. @@ -598,6 +762,26 @@ impl Metrics { metrics.povs_distributed.inc(); } } + + /// Provide a timer for `handle_signal` which observes on drop. + fn time_handle_signal(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_signal.start_timer()) + } + + /// Provide a timer for `handle_fetch` which observes on drop. + fn time_handle_fetch(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_fetch.start_timer()) + } + + /// Provide a timer for `handle_distribute` which observes on drop. + fn time_handle_distribute(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_distribute.start_timer()) + } + + /// Provide a timer for `handle_network_update` which observes on drop. + fn time_handle_network_update(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.handle_network_update.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -610,10 +794,43 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + handle_signal: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_pov_distribution_handle_signal", + "Time spent within `pov_distribution::handle_signal`", + ) + )?, + registry, + )?, + handle_fetch: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_pov_distribution_handle_fetch", + "Time spent within `pov_distribution::handle_fetch`", + ) + )?, + registry, + )?, + handle_distribute: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_pov_distribution_handle_distribute", + "Time spent within `pov_distribution::handle_distribute`", + ) + )?, + registry, + )?, + handle_network_update: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_pov_distribution_handle_network_update", + "Time spent within `pov_distribution::handle_network_update`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } } - -#[cfg(test)] -mod tests; \ No newline at end of file diff --git a/node/network/pov-distribution/src/tests.rs b/node/network/pov-distribution/src/tests.rs index 65f32ffe8810568fcb0cd189d23578d97e3e264f..a6358bedfd8fa4cd527846c40678ca4bbebb300d 100644 --- a/node/network/pov-distribution/src/tests.rs +++ b/node/network/pov-distribution/src/tests.rs @@ -1,7 +1,21 @@ use super::*; -use futures::executor; -use polkadot_primitives::v1::BlockData; + +use std::time::Duration; + use assert_matches::assert_matches; +use futures::executor; +use tracing::trace; +use smallvec::smallvec; + +use sp_keyring::Sr25519Keyring; + +use polkadot_primitives::v1::{ + AuthorityDiscoveryId, BlockData, CoreState, GroupRotationInfo, Id as ParaId, + ScheduledCore, ValidatorIndex, SessionIndex, SessionInfo, +}; +use polkadot_subsystem::messages::{RuntimeApiMessage, RuntimeApiRequest}; +use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_node_subsystem_util::TimeoutExt; fn make_pov(data: Vec) -> PoV { PoV { block_data: BlockData(data) } @@ -15,6 +29,510 @@ fn make_peer_state(awaited: Vec<(Hash, Vec)>) } } +fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { + val_ids.iter().map(|v| v.public().into()).collect() +} + +fn validator_authority_id(val_ids: &[Sr25519Keyring]) -> Vec { + val_ids.iter().map(|v| v.public().into()).collect() +} + +type VirtualOverseer = test_helpers::TestSubsystemContextHandle; + +struct TestHarness { + virtual_overseer: VirtualOverseer, +} + +fn test_harness>( + test: impl FnOnce(TestHarness) -> T, +) { + let _ = env_logger::builder() + .is_test(true) + .filter( + Some("polkadot_pov_distribution"), + log::LevelFilter::Trace, + ) + .filter( + Some(LOG_TARGET), + log::LevelFilter::Trace, + ) + .try_init(); + + let pool = sp_core::testing::TaskExecutor::new(); + + let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + + let subsystem = super::PoVDistribution::new(Metrics::default()); + + let subsystem = subsystem.run(context); + + let test_fut = test(TestHarness { virtual_overseer }); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + + executor::block_on(future::select(test_fut, subsystem)); +} + +const TIMEOUT: Duration = Duration::from_millis(100); + +async fn overseer_send( + overseer: &mut VirtualOverseer, + msg: PoVDistributionMessage, +) { + trace!("Sending message:\n{:?}", &msg); + overseer + .send(FromOverseer::Communication { msg }) + .timeout(TIMEOUT) + .await + .expect(&format!("{:?} is more than enough for sending messages.", TIMEOUT)); +} + +async fn overseer_recv( + overseer: &mut VirtualOverseer, +) -> AllMessages { + let msg = overseer_recv_with_timeout(overseer, TIMEOUT) + .await + .expect(&format!("{:?} is more than enough to receive messages", TIMEOUT)); + + trace!("Received message:\n{:?}", &msg); + + msg +} + +async fn overseer_recv_with_timeout( + overseer: &mut VirtualOverseer, + timeout: Duration, +) -> Option { + trace!("Waiting for message..."); + overseer + .recv() + .timeout(timeout) + .await +} + +async fn overseer_signal( + overseer: &mut VirtualOverseer, + signal: OverseerSignal, +) { + overseer + .send(FromOverseer::Signal(signal)) + .timeout(TIMEOUT) + .await + .expect(&format!("{:?} is more than enough for sending signals.", TIMEOUT)); +} + +#[derive(Clone)] +struct TestState { + chain_ids: Vec, + validators: Vec, + validator_public: Vec, + validator_authority_id: Vec, + validator_peer_id: Vec, + validator_groups: (Vec>, GroupRotationInfo), + relay_parent: Hash, + availability_cores: Vec, + session_index: SessionIndex, +} + +impl Default for TestState { + fn default() -> Self { + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + + let chain_ids = vec![chain_a, chain_b]; + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + + let validator_public = validator_pubkeys(&validators); + let validator_authority_id = validator_authority_id(&validators); + + let validator_peer_id = std::iter::repeat_with(|| PeerId::random()) + .take(validator_public.len()) + .collect(); + + let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]]; + let group_rotation_info = GroupRotationInfo { + session_start_block: 0, + group_rotation_frequency: 100, + now: 1, + }; + let validator_groups = (validator_groups, group_rotation_info); + + let availability_cores = vec![ + CoreState::Scheduled(ScheduledCore { + para_id: chain_ids[0], + collator: None, + }), + CoreState::Scheduled(ScheduledCore { + para_id: chain_ids[1], + collator: None, + }), + ]; + + let relay_parent = Hash::repeat_byte(0x05); + + Self { + chain_ids, + validators, + validator_public, + validator_authority_id, + validator_peer_id, + validator_groups, + relay_parent, + availability_cores, + session_index: 1, + } + } +} + +async fn test_validator_discovery( + virtual_overseer: &mut VirtualOverseer, + expected_relay_parent: Hash, + session_index: SessionIndex, + validator_ids: &[ValidatorId], + discovery_ids: &[AuthorityDiscoveryId], + validator_group: &[ValidatorIndex], +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionIndexForChild(tx), + )) => { + assert_eq!(relay_parent, expected_relay_parent); + tx.send(Ok(session_index)).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::SessionInfo(index, tx), + )) => { + assert_eq!(relay_parent, expected_relay_parent); + assert_eq!(index, session_index); + + let validators = validator_group.iter() + .map(|idx| validator_ids[*idx as usize].clone()) + .collect(); + + let discovery_keys = validator_group.iter() + .map(|idx| discovery_ids[*idx as usize].clone()) + .collect(); + + tx.send(Ok(Some(SessionInfo { + validators, + discovery_keys, + ..Default::default() + }))).unwrap(); + } + ); +} + +#[test] +fn ask_validators_for_povs() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + + let pov_block = PoV { + block_data: BlockData(vec![42, 43, 44]), + }; + + let pov_hash = pov_block.hash(); + + let mut candidate = CandidateDescriptor::default(); + + let current = test_state.relay_parent.clone(); + candidate.para_id = test_state.chain_ids[0]; + candidate.pov_hash = pov_hash; + candidate.relay_parent = test_state.relay_parent; + + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: smallvec![test_state.relay_parent.clone()], + deactivated: smallvec![], + }), + ).await; + + // first subsystem will try to obtain validators. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )) => { + assert_eq!(relay_parent, current); + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + let (tx, pov_fetch_result) = oneshot::channel(); + + overseer_send( + &mut virtual_overseer, + PoVDistributionMessage::FetchPoV(test_state.relay_parent.clone(), candidate, tx), + ).await; + + // obtain the availability cores. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx) + )) => { + assert_eq!(relay_parent, current); + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + + // Obtain the validator groups + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::ValidatorGroups(tx) + )) => { + assert_eq!(relay_parent, current); + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); + + // obtain the validators per relay parent + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )) => { + assert_eq!(relay_parent, current); + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + test_validator_discovery( + &mut virtual_overseer, + current, + test_state.session_index, + &test_state.validator_public, + &test_state.validator_authority_id, + &test_state.validator_groups.0[0], + ).await; + + // We now should connect to our validator group. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ConnectToValidators { + validator_ids, + mut connected, + .. + } + ) => { + assert_eq!(validator_ids.len(), 3); + assert!(validator_ids.iter().all(|id| test_state.validator_authority_id.contains(id))); + + let result = vec![ + (test_state.validator_authority_id[2].clone(), test_state.validator_peer_id[2].clone()), + (test_state.validator_authority_id[0].clone(), test_state.validator_peer_id[0].clone()), + (test_state.validator_authority_id[4].clone(), test_state.validator_peer_id[4].clone()), + ]; + + result.into_iter().for_each(|r| connected.try_send(r).unwrap()); + } + ); + + for i in vec![2, 0, 4] { + overseer_send( + &mut virtual_overseer, + PoVDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange( + test_state.validator_peer_id[i].clone(), + View(vec![current]), + ) + ) + ).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + to_peers, + payload, + )) => { + assert_eq!(to_peers, vec![test_state.validator_peer_id[i].clone()]); + assert_eq!(payload, awaiting_message(current.clone(), vec![pov_hash.clone()])); + } + ); + } + + overseer_send( + &mut virtual_overseer, + PoVDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerMessage( + test_state.validator_peer_id[2].clone(), + protocol_v1::PoVDistributionMessage::SendPoV(current, pov_hash, pov_block.clone()), + ) + ) + ).await; + + assert_eq!(*pov_fetch_result.await.unwrap(), pov_block); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge(NetworkBridgeMessage::ReportPeer(id, benefit)) => { + assert_eq!(benefit, BENEFIT_FRESH_POV); + assert_eq!(id, test_state.validator_peer_id[2].clone()); + } + ); + + // Now let's test that if some peer is ahead of us we would still + // send `Await` on `FetchPoV` message to it. + let next_leaf = Hash::repeat_byte(10); + + // A validator's view changes and now is lets say ahead of us. + overseer_send( + &mut virtual_overseer, + PoVDistributionMessage::NetworkBridgeUpdateV1( + NetworkBridgeEvent::PeerViewChange( + test_state.validator_peer_id[2].clone(), + View(vec![next_leaf]), + ) + ) + ).await; + + let pov_block = PoV { + block_data: BlockData(vec![45, 46, 47]), + }; + + let pov_hash = pov_block.hash(); + + let candidate = CandidateDescriptor { + para_id: test_state.chain_ids[0], + pov_hash, + relay_parent: next_leaf.clone(), + ..Default::default() + }; + + let (tx, _pov_fetch_result) = oneshot::channel(); + + overseer_signal( + &mut virtual_overseer, + OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { + activated: smallvec![next_leaf.clone()], + deactivated: smallvec![current.clone()], + }) + ).await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )) => { + assert_eq!(relay_parent, next_leaf); + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + overseer_send( + &mut virtual_overseer, + PoVDistributionMessage::FetchPoV(next_leaf.clone(), candidate, tx), + ).await; + + // Obtain the availability cores. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx) + )) => { + assert_eq!(relay_parent, next_leaf); + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + + // Obtain the validator groups + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::ValidatorGroups(tx) + )) => { + assert_eq!(relay_parent, next_leaf); + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); + + // obtain the validators per relay parent + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )) => { + assert_eq!(relay_parent, next_leaf); + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + // obtain the validator_id to authority_id mapping + test_validator_discovery( + &mut virtual_overseer, + next_leaf, + test_state.session_index, + &test_state.validator_public, + &test_state.validator_authority_id, + &test_state.validator_groups.0[0], + ).await; + + // We now should connect to our validator group. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ConnectToValidators { + validator_ids, + mut connected, + .. + } + ) => { + assert_eq!(validator_ids.len(), 3); + assert!(validator_ids.iter().all(|id| test_state.validator_authority_id.contains(id))); + + let result = vec![ + (test_state.validator_authority_id[2].clone(), test_state.validator_peer_id[2].clone()), + (test_state.validator_authority_id[0].clone(), test_state.validator_peer_id[0].clone()), + (test_state.validator_authority_id[4].clone(), test_state.validator_peer_id[4].clone()), + ]; + + result.into_iter().for_each(|r| connected.try_send(r).unwrap()); + } + ); + + // We already know that the leaf in question in the peer's view so we request + // a chunk from them right away. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( + to_peers, + payload, + )) => { + assert_eq!(to_peers, vec![test_state.validator_peer_id[2].clone()]); + assert_eq!(payload, awaiting_message(next_leaf.clone(), vec![pov_hash.clone()])); + } + ); + }); +} + #[test] fn distributes_to_those_awaiting_and_completes_local() { let hash_a: Hash = [0; 32].into(); @@ -66,6 +584,7 @@ fn distributes_to_those_awaiting_and_completes_local() { }, our_view: View(vec![hash_a, hash_b]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -80,7 +599,7 @@ fn distributes_to_those_awaiting_and_completes_local() { hash_a, descriptor, Arc::new(pov.clone()), - ).await.unwrap(); + ).await; assert!(!state.peer_state[&peer_a].awaited[&hash_a].contains(&pov_hash)); assert!(state.peer_state[&peer_c].awaited[&hash_b].contains(&pov_hash)); @@ -103,8 +622,10 @@ fn distributes_to_those_awaiting_and_completes_local() { }); } + #[test] fn we_inform_peers_with_same_view_we_are_awaiting() { + let hash_a: Hash = [0; 32].into(); let hash_b: Hash = [1; 32].into(); @@ -146,6 +667,7 @@ fn we_inform_peers_with_same_view_we_are_awaiting() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -153,29 +675,124 @@ fn we_inform_peers_with_same_view_we_are_awaiting() { let mut descriptor = CandidateDescriptor::default(); descriptor.pov_hash = pov_hash; + let para_id_1 = ParaId::from(1); + let para_id_2 = ParaId::from(2); + + descriptor.para_id = para_id_1; + + let availability_cores = vec![ + CoreState::Scheduled(ScheduledCore { + para_id: para_id_1, + collator: None, + }), + CoreState::Scheduled(ScheduledCore { + para_id: para_id_2, + collator: None, + }), + ]; + + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + + let validator_authority_id = validator_authority_id(&validators); + let validators = validator_pubkeys(&validators); + + let validator_peer_id: Vec<_> = std::iter::repeat_with(|| PeerId::random()) + .take(validators.len()) + .collect(); + + let validator_groups = vec![vec![2, 0, 4], vec![1], vec![3]]; + let group_rotation_info = GroupRotationInfo { + session_start_block: 0, + group_rotation_frequency: 100, + now: 1, + }; + + let validator_groups = (validator_groups, group_rotation_info); + executor::block_on(async move { - handle_fetch( + let handle_future = handle_fetch( &mut state, &mut ctx, hash_a, descriptor, pov_send, - ).await.unwrap(); + ); - assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1); + let check_future = async move { + //assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1); + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx) + )) => { + assert_eq!(relay_parent, hash_a); + tx.send(Ok(availability_cores)).unwrap(); + } + ); - assert_matches!( - handle.recv().await, - AllMessages::NetworkBridge( - NetworkBridgeMessage::SendValidationMessage(peers, message) - ) => { - assert_eq!(peers, vec![peer_a.clone()]); - assert_eq!( - message, - awaiting_message(hash_a, vec![pov_hash]), - ); - } - ) + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::ValidatorGroups(tx) + )) => { + assert_eq!(relay_parent, hash_a); + tx.send(Ok(validator_groups.clone())).unwrap(); + } + ); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Validators(tx), + )) => { + assert_eq!(relay_parent, hash_a); + tx.send(Ok(validators.clone())).unwrap(); + } + ); + + test_validator_discovery( + &mut handle, + hash_a, + 1, + &validators, + &validator_authority_id, + &validator_groups.0[0], + ).await; + + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridge( + NetworkBridgeMessage::ConnectToValidators { + validator_ids, + mut connected, + .. + } + ) => { + assert_eq!(validator_ids.len(), 3); + assert!(validator_ids.iter().all(|id| validator_authority_id.contains(id))); + + let result = vec![ + (validator_authority_id[2].clone(), validator_peer_id[2].clone()), + (validator_authority_id[0].clone(), validator_peer_id[0].clone()), + (validator_authority_id[4].clone(), validator_peer_id[4].clone()), + ]; + + result.into_iter().for_each(|r| connected.try_send(r).unwrap()); + } + ); + + }; + + futures::join!(handle_future, check_future); }); } @@ -224,6 +841,7 @@ fn peer_view_change_leads_to_us_informing() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -234,7 +852,7 @@ fn peer_view_change_leads_to_us_informing() { &mut state, &mut ctx, NetworkBridgeEvent::PeerViewChange(peer_a.clone(), View(vec![hash_a, hash_b])), - ).await.unwrap(); + ).await; assert_matches!( handle.recv().await, @@ -296,6 +914,7 @@ fn peer_complete_fetch_and_is_rewarded() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -310,7 +929,7 @@ fn peer_complete_fetch_and_is_rewarded() { peer_a.clone(), send_pov_message(hash_a, pov_hash, pov.clone()), ).focus().unwrap(), - ).await.unwrap(); + ).await; handle_network_update( &mut state, @@ -319,7 +938,7 @@ fn peer_complete_fetch_and_is_rewarded() { peer_b.clone(), send_pov_message(hash_a, pov_hash, pov.clone()), ).focus().unwrap(), - ).await.unwrap(); + ).await; assert_eq!(&*pov_recv.await.unwrap(), &pov); @@ -385,6 +1004,7 @@ fn peer_punished_for_sending_bad_pov() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -399,7 +1019,7 @@ fn peer_punished_for_sending_bad_pov() { peer_a.clone(), send_pov_message(hash_a, pov_hash, bad_pov.clone()), ).focus().unwrap(), - ).await.unwrap(); + ).await; // didn't complete our sender. assert_eq!(state.relay_parent_state[&hash_a].fetching[&pov_hash].len(), 1); @@ -449,6 +1069,7 @@ fn peer_punished_for_sending_unexpected_pov() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -463,7 +1084,7 @@ fn peer_punished_for_sending_unexpected_pov() { peer_a.clone(), send_pov_message(hash_a, pov_hash, pov.clone()), ).focus().unwrap(), - ).await.unwrap(); + ).await; assert_matches!( handle.recv().await, @@ -511,6 +1132,7 @@ fn peer_punished_for_sending_pov_out_of_our_view() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -525,7 +1147,7 @@ fn peer_punished_for_sending_pov_out_of_our_view() { peer_a.clone(), send_pov_message(hash_b, pov_hash, pov.clone()), ).focus().unwrap(), - ).await.unwrap(); + ).await; assert_matches!( handle.recv().await, @@ -570,6 +1192,7 @@ fn peer_reported_for_awaiting_too_much() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -588,7 +1211,7 @@ fn peer_reported_for_awaiting_too_much() { peer_a.clone(), awaiting_message(hash_a, vec![pov_hash]), ).focus().unwrap(), - ).await.unwrap(); + ).await; } assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited); @@ -602,7 +1225,7 @@ fn peer_reported_for_awaiting_too_much() { peer_a.clone(), awaiting_message(hash_a, vec![last_pov_hash]), ).focus().unwrap(), - ).await.unwrap(); + ).await; // No more bookkeeping for you! assert_eq!(state.peer_state[&peer_a].awaited[&hash_a].len(), max_plausibly_awaited); @@ -656,6 +1279,7 @@ fn peer_reported_for_awaiting_outside_their_view() { }, our_view: View(vec![hash_a, hash_b]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -672,7 +1296,7 @@ fn peer_reported_for_awaiting_outside_their_view() { peer_a.clone(), awaiting_message(hash_b, vec![pov_hash]), ).focus().unwrap(), - ).await.unwrap(); + ).await; assert!(state.peer_state[&peer_a].awaited.get(&hash_b).is_none()); @@ -719,6 +1343,7 @@ fn peer_reported_for_awaiting_outside_our_view() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -735,7 +1360,7 @@ fn peer_reported_for_awaiting_outside_our_view() { peer_a.clone(), awaiting_message(hash_b, vec![pov_hash]), ).focus().unwrap(), - ).await.unwrap(); + ).await; // Illegal `awaited` is ignored. assert!(state.peer_state[&peer_a].awaited[&hash_b].is_empty()); @@ -797,6 +1422,7 @@ fn peer_complete_fetch_leads_to_us_completing_others() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -810,7 +1436,7 @@ fn peer_complete_fetch_leads_to_us_completing_others() { peer_a.clone(), send_pov_message(hash_a, pov_hash, pov.clone()), ).focus().unwrap(), - ).await.unwrap(); + ).await; assert_eq!(&*pov_recv.await.unwrap(), &pov); @@ -880,6 +1506,7 @@ fn peer_completing_request_no_longer_awaiting() { }, our_view: View(vec![hash_a]), metrics: Default::default(), + connection_requests: Default::default(), }; let pool = sp_core::testing::TaskExecutor::new(); @@ -893,7 +1520,7 @@ fn peer_completing_request_no_longer_awaiting() { peer_a.clone(), send_pov_message(hash_a, pov_hash, pov.clone()), ).focus().unwrap(), - ).await.unwrap(); + ).await; assert_eq!(&*pov_recv.await.unwrap(), &pov); diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index 11e1cc4180e38373aef61ca3b1875e50ec28012b..cd6e3fea84ab3a926c56f489059042be6cc07748 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -8,5 +8,5 @@ description = "Primitives types for the Node-side" [dependencies] polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } -parity-scale-codec = { version = "1.3.4", default-features = false, features = ["derive"] } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 31ec729d8b8663ea8abac49612fd073908fdb487..8409a795ae56eeb462b0ada0c3909e4b0466cf26 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -166,6 +166,15 @@ impl NetworkBridgeEvent { pub struct View(pub Vec); impl View { + /// Replace `self` with `new`. + /// + /// Returns an iterator that will yield all elements of `new` that were not part of `self`. + pub fn replace_difference(&mut self, new: View) -> impl Iterator { + let old = std::mem::replace(self, new); + + self.0.iter().filter(move |h| !old.contains(h)) + } + /// Returns an iterator of the hashes present in `Self` but not in `other`. pub fn difference<'a>(&'a self, other: &'a View) -> impl Iterator + 'a { self.0.iter().filter(move |h| !other.contains(h)) @@ -186,7 +195,7 @@ impl View { pub mod v1 { use polkadot_primitives::v1::{ Hash, CollatorId, Id as ParaId, ErasureChunk, CandidateReceipt, - SignedAvailabilityBitfield, PoV, + SignedAvailabilityBitfield, PoV, CandidateHash, }; use polkadot_node_primitives::SignedFullStatement; use parity_scale_codec::{Encode, Decode}; @@ -198,7 +207,7 @@ pub mod v1 { pub enum AvailabilityDistributionMessage { /// An erasure chunk for a given candidate hash. #[codec(index = "0")] - Chunk(Hash, ErasureChunk), + Chunk(CandidateHash, ErasureChunk), } /// Network messages used by the bitfield distribution subsystem. diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index fa0b3786d0146e221f31bb37ba32f14b46f2f771..7ee9ab3ff6b02a953b18bd894ce148dd61d50114 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -6,22 +6,23 @@ description = "Statement Distribution Subsystem" edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" polkadot-primitives = { path = "../../../primitives" } node-primitives = { package = "polkadot-node-primitives", path = "../../primitives" } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-network-protocol = { path = "../../network/protocol" } -arrayvec = "0.5.1" -indexmap = "1.4.0" +arrayvec = "0.5.2" +indexmap = "1.6.0" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } -assert_matches = "1.3.0" -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +assert_matches = "1.4.0" +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index d6641e6f57839eac7dac1847b56387647f9c4b6c..5e6a58f948658889de6481545ecd686f446d5bbd 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -30,12 +30,10 @@ use polkadot_subsystem::{ RuntimeApiMessage, RuntimeApiRequest, }, }; -use polkadot_node_subsystem_util::{ - metrics::{self, prometheus}, -}; +use polkadot_node_subsystem_util::metrics::{self, prometheus}; use node_primitives::SignedFullStatement; use polkadot_primitives::v1::{ - Hash, CompactStatement, ValidatorIndex, ValidatorId, SigningContext, ValidatorSignature, + Hash, CompactStatement, ValidatorIndex, ValidatorId, SigningContext, ValidatorSignature, CandidateHash, }; use polkadot_node_network_protocol::{ v1 as protocol_v1, View, PeerId, ReputationChange as Rep, NetworkBridgeEvent, @@ -81,7 +79,7 @@ impl Subsystem for StatementDistribution // within `run`. SpawnedSubsystem { name: "statement-distribution-subsystem", - future: self.run(ctx).map(|_| ()).boxed(), + future: self.run(ctx).boxed(), } } } @@ -102,32 +100,32 @@ impl StatementDistribution { /// via other means. #[derive(Default)] struct VcPerPeerTracker { - local_observed: arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>, - remote_observed: arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>, + local_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, + remote_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, } impl VcPerPeerTracker { - // Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - // based on a message that we have sent it from our local pool. - fn note_local(&mut self, h: Hash) { + /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) + /// based on a message that we have sent it from our local pool. + fn note_local(&mut self, h: CandidateHash) { if !note_hash(&mut self.local_observed, h) { - log::warn!("Statement distribution is erroneously attempting to distribute more \ + tracing::warn!("Statement distribution is erroneously attempting to distribute more \ than {} candidate(s) per validator index. Ignoring", VC_THRESHOLD); } } - // Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - // based on a message that it has sent us. - // - // Returns `true` if the peer was allowed to send us such a message, `false` otherwise. - fn note_remote(&mut self, h: Hash) -> bool { + /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) + /// based on a message that it has sent us. + /// + /// Returns `true` if the peer was allowed to send us such a message, `false` otherwise. + fn note_remote(&mut self, h: CandidateHash) -> bool { note_hash(&mut self.remote_observed, h) } } fn note_hash( - observed: &mut arrayvec::ArrayVec<[Hash; VC_THRESHOLD]>, - h: Hash, + observed: &mut arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, + h: CandidateHash, ) -> bool { if observed.contains(&h) { return true; } @@ -139,7 +137,7 @@ fn note_hash( struct PeerRelayParentKnowledge { /// candidates that the peer is aware of. This indicates that we can /// send other statements pertaining to that candidate. - known_candidates: HashSet, + known_candidates: HashSet, /// fingerprints of all statements a peer should be aware of: those that /// were sent to the peer by us. sent_statements: HashSet<(CompactStatement, ValidatorIndex)>, @@ -149,7 +147,7 @@ struct PeerRelayParentKnowledge { /// How many candidates this peer is aware of for each given validator index. seconded_counts: HashMap, /// How many statements we've received for each candidate that we're aware of. - received_message_count: HashMap, + received_message_count: HashMap, } impl PeerRelayParentKnowledge { @@ -164,6 +162,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> Option { let already_known = self.sent_statements.contains(fingerprint) || self.received_statements.contains(fingerprint); @@ -212,6 +211,7 @@ impl PeerRelayParentKnowledge { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, fingerprint: &(CompactStatement, ValidatorIndex), @@ -246,7 +246,7 @@ impl PeerRelayParentKnowledge { { let received_per_candidate = self.received_message_count - .entry(candidate_hash.clone()) + .entry(*candidate_hash) .or_insert(0); if *received_per_candidate >= max_message_count { @@ -278,6 +278,7 @@ impl PeerData { /// /// This returns `Some(true)` if this is the first time the peer has become aware of a /// candidate with the given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn send( &mut self, relay_parent: &Hash, @@ -302,13 +303,16 @@ impl PeerData { /// /// This returns `Ok(true)` if this is the first time the peer has become aware of a /// candidate with given hash. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn receive( &mut self, relay_parent: &Hash, fingerprint: &(CompactStatement, ValidatorIndex), max_message_count: usize, ) -> Result { - self.view_knowledge.get_mut(relay_parent).ok_or(COST_UNEXPECTED_STATEMENT)? + self.view_knowledge + .get_mut(relay_parent) + .ok_or(COST_UNEXPECTED_STATEMENT)? .receive(fingerprint, max_message_count) } } @@ -372,7 +376,7 @@ enum NotedStatement<'a> { struct ActiveHeadData { /// All candidates we are aware of for this head, keyed by hash. - candidates: HashSet, + candidates: HashSet, /// Stored statements for circulation to peers. /// /// These are iterable in insertion order, and `Seconded` statements are always @@ -411,6 +415,7 @@ impl ActiveHeadData { /// /// Any other statements or those that reference a candidate we are not aware of cannot be accepted /// and will return `NotedStatement::NotUseful`. + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { let validator_index = statement.validator_index(); let comparator = StoredStatementComparator { @@ -464,7 +469,7 @@ impl ActiveHeadData { } /// Get an iterator over all statements for the active head that are for a particular candidate. - fn statements_about(&self, candidate_hash: Hash) + fn statements_about(&self, candidate_hash: CandidateHash) -> impl Iterator + '_ { self.statements().filter(move |s| s.compact().candidate_hash() == &candidate_hash) @@ -487,12 +492,15 @@ fn check_statement_signature( .and_then(|v| statement.check_signature(&signing_context, v)) } +type StatementListeners = Vec>; + /// Informs all registered listeners about a newly received statement. /// /// Removes all closed listeners. +#[tracing::instrument(level = "trace", skip(listeners), fields(subsystem = LOG_TARGET))] async fn inform_statement_listeners( statement: &SignedFullStatement, - listeners: &mut Vec>, + listeners: &mut StatementListeners, ) { // Ignore the errors since these will be removed later. stream::iter(listeners.iter_mut()).for_each_concurrent( @@ -509,6 +517,7 @@ async fn inform_statement_listeners( /// circulates the statement to all peers who have not seen it yet, and /// sends all statements dependent on that statement to peers who could previously not receive /// them but now can. +#[tracing::instrument(level = "trace", skip(peers, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn circulate_statement_and_dependents( peers: &mut HashMap, active_heads: &mut HashMap, @@ -516,42 +525,42 @@ async fn circulate_statement_and_dependents( relay_parent: Hash, statement: SignedFullStatement, metrics: &Metrics, -) -> SubsystemResult<()> { - if let Some(active_head)= active_heads.get_mut(&relay_parent) { - - // First circulate the statement directly to all peers needing it. - // The borrow of `active_head` needs to encompass only this (Rust) statement. - let outputs: Option<(Hash, Vec)> = { - match active_head.note_statement(statement) { - NotedStatement::Fresh(stored) => Some(( - stored.compact().candidate_hash().clone(), - circulate_statement(peers, ctx, relay_parent, stored).await?, - )), - _ => None, - } - }; +) { + let active_head = match active_heads.get_mut(&relay_parent) { + Some(res) => res, + None => return, + }; - // Now send dependent statements to all peers needing them, if any. - if let Some((candidate_hash, peers_needing_dependents)) = outputs { - for peer in peers_needing_dependents { - if let Some(peer_data) = peers.get_mut(&peer) { - // defensive: the peer data should always be some because the iterator - // of peers is derived from the set of peers. - send_statements_about( - peer, - peer_data, - ctx, - relay_parent, - candidate_hash, - &*active_head, - metrics, - ).await?; - } + // First circulate the statement directly to all peers needing it. + // The borrow of `active_head` needs to encompass only this (Rust) statement. + let outputs: Option<(CandidateHash, Vec)> = { + match active_head.note_statement(statement) { + NotedStatement::Fresh(stored) => Some(( + *stored.compact().candidate_hash(), + circulate_statement(peers, ctx, relay_parent, stored).await, + )), + _ => None, + } + }; + + // Now send dependent statements to all peers needing them, if any. + if let Some((candidate_hash, peers_needing_dependents)) = outputs { + for peer in peers_needing_dependents { + if let Some(peer_data) = peers.get_mut(&peer) { + // defensive: the peer data should always be some because the iterator + // of peers is derived from the set of peers. + send_statements_about( + peer, + peer_data, + ctx, + relay_parent, + candidate_hash, + &*active_head, + metrics, + ).await; } } } - - Ok(()) } fn statement_message(relay_parent: Hash, statement: SignedFullStatement) @@ -564,12 +573,13 @@ fn statement_message(relay_parent: Hash, statement: SignedFullStatement) /// Circulates a statement to all peers who have not seen it yet, and returns /// an iterator over peers who need to have dependent statements sent. +#[tracing::instrument(level = "trace", skip(peers, ctx), fields(subsystem = LOG_TARGET))] async fn circulate_statement( peers: &mut HashMap, ctx: &mut impl SubsystemContext, relay_parent: Hash, stored: &StoredStatement, -) -> SubsystemResult> { +) -> Vec { let fingerprint = stored.fingerprint(); let mut peers_to_send = HashMap::new(); @@ -586,26 +596,27 @@ async fn circulate_statement( ctx.send_message(AllMessages::NetworkBridge(NetworkBridgeMessage::SendValidationMessage( peers_to_send.keys().cloned().collect(), payload, - ))).await?; + ))).await; } - Ok(peers_to_send.into_iter().filter_map(|(peer, needs_dependent)| if needs_dependent { + peers_to_send.into_iter().filter_map(|(peer, needs_dependent)| if needs_dependent { Some(peer) } else { None - }).collect()) + }).collect() } /// Send all statements about a given candidate hash to a peer. +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements_about( peer: PeerId, peer_data: &mut PeerData, ctx: &mut impl SubsystemContext, relay_parent: Hash, - candidate_hash: Hash, + candidate_hash: CandidateHash, active_head: &ActiveHeadData, metrics: &Metrics, -) -> SubsystemResult<()> { +) { for statement in active_head.statements_about(candidate_hash) { if peer_data.send(&relay_parent, &statement.fingerprint()).is_some() { let payload = statement_message( @@ -615,16 +626,15 @@ async fn send_statements_about( ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload) - )).await?; + )).await; metrics.on_statement_distributed(); } } - - Ok(()) } /// Send all statements at a given relay-parent to a peer. +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_head, metrics), fields(subsystem = LOG_TARGET))] async fn send_statements( peer: PeerId, peer_data: &mut PeerData, @@ -632,7 +642,7 @@ async fn send_statements( relay_parent: Hash, active_head: &ActiveHeadData, metrics: &Metrics, -) -> SubsystemResult<()> { +) { for statement in active_head.statements() { if peer_data.send(&relay_parent, &statement.fingerprint()).is_some() { let payload = statement_message( @@ -642,20 +652,18 @@ async fn send_statements( ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::SendValidationMessage(vec![peer.clone()], payload) - )).await?; + )).await; metrics.on_statement_distributed(); } } - - Ok(()) } async fn report_peer( ctx: &mut impl SubsystemContext, peer: PeerId, rep: Rep, -) -> SubsystemResult<()> { +) { ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(peer, rep) )).await @@ -666,6 +674,7 @@ async fn report_peer( // // This function checks the signature and ensures the statement is compatible with our // view. +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn handle_incoming_message<'a>( peer: PeerId, peer_data: &mut PeerData, @@ -674,13 +683,15 @@ async fn handle_incoming_message<'a>( ctx: &mut impl SubsystemContext, message: protocol_v1::StatementDistributionMessage, metrics: &Metrics, -) -> SubsystemResult> { + statement_listeners: &mut StatementListeners, +) -> Option<(Hash, &'a StoredStatement)> { let (relay_parent, statement) = match message { protocol_v1::StatementDistributionMessage::Statement(r, s) => (r, s), }; if !our_view.contains(&relay_parent) { - return report_peer(ctx, peer, COST_UNEXPECTED_STATEMENT).await.map(|_| None); + report_peer(ctx, peer, COST_UNEXPECTED_STATEMENT).await; + return None; } let active_head = match active_heads.get_mut(&relay_parent) { @@ -688,14 +699,18 @@ async fn handle_incoming_message<'a>( None => { // This should never be out-of-sync with our view if the view updates // correspond to actual `StartWork` messages. So we just log and ignore. - log::warn!("Our view out-of-sync with active heads. Head {} not found", relay_parent); - return Ok(None); + tracing::warn!( + requested_relay_parent = %relay_parent, + "our view out-of-sync with active heads; head not found", + ); + return None; } }; // check the signature on the statement. if let Err(()) = check_statement_signature(&active_head, relay_parent, &statement) { - return report_peer(ctx, peer, COST_INVALID_SIGNATURE).await.map(|_| None); + report_peer(ctx, peer, COST_INVALID_SIGNATURE).await; + return None; } // Ensure the statement is stored in the peer data. @@ -706,8 +721,8 @@ async fn handle_incoming_message<'a>( let max_message_count = active_head.validators.len() * 2; match peer_data.receive(&relay_parent, &fingerprint, max_message_count) { Err(rep) => { - report_peer(ctx, peer, rep).await?; - return Ok(None) + report_peer(ctx, peer, rep).await; + return None; } Ok(true) => { // Send the peer all statements concerning the candidate that we have, @@ -720,27 +735,30 @@ async fn handle_incoming_message<'a>( fingerprint.0.candidate_hash().clone(), &*active_head, metrics, - ).await? + ).await; } Ok(false) => {} } + inform_statement_listeners(&statement, statement_listeners).await; + // Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation // or unpinned to a seconded candidate. So it is safe to place it into the storage. match active_head.note_statement(statement) { - NotedStatement::NotUseful => Ok(None), + NotedStatement::NotUseful => None, NotedStatement::UsefulButKnown => { - report_peer(ctx, peer, BENEFIT_VALID_STATEMENT).await?; - Ok(None) + report_peer(ctx, peer, BENEFIT_VALID_STATEMENT).await; + None } NotedStatement::Fresh(statement) => { - report_peer(ctx, peer, BENEFIT_VALID_STATEMENT_FIRST).await?; - Ok(Some((relay_parent, statement))) + report_peer(ctx, peer, BENEFIT_VALID_STATEMENT_FIRST).await; + Some((relay_parent, statement)) } } } /// Update a peer's view. Sends all newly unlocked statements based on the previous +#[tracing::instrument(level = "trace", skip(peer_data, ctx, active_heads, metrics), fields(subsystem = LOG_TARGET))] async fn update_peer_view_and_send_unlocked( peer: PeerId, peer_data: &mut PeerData, @@ -748,7 +766,7 @@ async fn update_peer_view_and_send_unlocked( active_heads: &HashMap, new_view: View, metrics: &Metrics, -) -> SubsystemResult<()> { +) { let old_view = std::mem::replace(&mut peer_data.view, new_view); // Remove entries for all relay-parents in the old view but not the new. @@ -770,13 +788,12 @@ async fn update_peer_view_and_send_unlocked( new, active_head, metrics, - ).await?; + ).await; } } - - Ok(()) } +#[tracing::instrument(level = "trace", skip(peers, active_heads, ctx, metrics), fields(subsystem = LOG_TARGET))] async fn handle_network_update( peers: &mut HashMap, active_heads: &mut HashMap, @@ -784,19 +801,17 @@ async fn handle_network_update( our_view: &mut View, update: NetworkBridgeEvent, metrics: &Metrics, -) -> SubsystemResult<()> { + statement_listeners: &mut StatementListeners, +) { match update { NetworkBridgeEvent::PeerConnected(peer, _role) => { peers.insert(peer, PeerData { view: Default::default(), view_knowledge: Default::default(), }); - - Ok(()) } NetworkBridgeEvent::PeerDisconnected(peer) => { peers.remove(&peer); - Ok(()) } NetworkBridgeEvent::PeerMessage(peer, message) => { match peers.get_mut(&peer) { @@ -809,7 +824,8 @@ async fn handle_network_update( ctx, message, metrics, - ).await?; + statement_listeners, + ).await; if let Some((relay_parent, new)) = new_stored { // When we receive a new message from a peer, we forward it to the @@ -817,12 +833,10 @@ async fn handle_network_update( let message = AllMessages::CandidateBacking( CandidateBackingMessage::Statement(relay_parent, new.statement.clone()) ); - ctx.send_message(message).await?; + ctx.send_message(message).await; } - - Ok(()) } - None => Ok(()), + None => (), } } @@ -838,7 +852,7 @@ async fn handle_network_update( metrics, ).await } - None => Ok(()), + None => (), } } NetworkBridgeEvent::OurViewChange(view) => { @@ -847,19 +861,22 @@ async fn handle_network_update( for new in our_view.difference(&old_view) { if !active_heads.contains_key(&new) { - log::warn!(target: LOG_TARGET, "Our network bridge view update \ + tracing::warn!( + target: LOG_TARGET, + unknown_hash = %new, + "Our network bridge view update \ inconsistent with `StartWork` messages we have received from overseer. \ - Contains unknown hash {}", new); + Contains unknown hash.", + ); } } - - Ok(()) } } } impl StatementDistribution { + #[tracing::instrument(skip(self, ctx), fields(subsystem = LOG_TARGET))] async fn run( self, mut ctx: impl SubsystemContext, @@ -867,13 +884,15 @@ impl StatementDistribution { let mut peers: HashMap = HashMap::new(); let mut our_view = View::default(); let mut active_heads: HashMap = HashMap::new(); - let mut statement_listeners: Vec> = Vec::new(); + let mut statement_listeners = StatementListeners::new(); let metrics = self.metrics; loop { let message = ctx.recv().await?; match message { FromOverseer::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { activated, .. })) => { + let _timer = metrics.time_active_leaves_update(); + for relay_parent in activated { let (validators, session_index) = { let (val_tx, val_rx) = oneshot::channel(); @@ -894,15 +913,15 @@ impl StatementDistribution { ctx.send_messages( std::iter::once(val_message).chain(std::iter::once(session_message)) - ).await?; + ).await; match (val_rx.await?, session_rx.await?) { (Ok(v), Ok(s)) => (v, s), (Err(e), _) | (_, Err(e)) => { - log::warn!( + tracing::warn!( target: LOG_TARGET, - "Failed to fetch runtime API data for active leaf: {:?}", - e, + err = ?e, + "Failed to fetch runtime API data for active leaf", ); // Lacking this bookkeeping might make us behave funny, although @@ -923,6 +942,8 @@ impl StatementDistribution { FromOverseer::Signal(OverseerSignal::Conclude) => break, FromOverseer::Communication { msg } => match msg { StatementDistributionMessage::Share(relay_parent, statement) => { + let _timer = metrics.time_share(); + inform_statement_listeners( &statement, &mut statement_listeners, @@ -934,9 +955,11 @@ impl StatementDistribution { relay_parent, statement, &metrics, - ).await?; + ).await; } - StatementDistributionMessage::NetworkBridgeUpdateV1(event) => + StatementDistributionMessage::NetworkBridgeUpdateV1(event) => { + let _timer = metrics.time_network_bridge_update_v1(); + handle_network_update( &mut peers, &mut active_heads, @@ -944,7 +967,9 @@ impl StatementDistribution { &mut our_view, event, &metrics, - ).await?, + &mut statement_listeners, + ).await; + } StatementDistributionMessage::RegisterStatementListener(tx) => { statement_listeners.push(tx); } @@ -958,6 +983,9 @@ impl StatementDistribution { #[derive(Clone)] struct MetricsInner { statements_distributed: prometheus::Counter, + active_leaves_update: prometheus::Histogram, + share: prometheus::Histogram, + network_bridge_update_v1: prometheus::Histogram, } /// Statement Distribution metrics. @@ -970,6 +998,21 @@ impl Metrics { metrics.statements_distributed.inc(); } } + + /// Provide a timer for `active_leaves_update` which observes on drop. + fn time_active_leaves_update(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.active_leaves_update.start_timer()) + } + + /// Provide a timer for `share` which observes on drop. + fn time_share(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.share.start_timer()) + } + + /// Provide a timer for `network_bridge_update_v1` which observes on drop. + fn time_network_bridge_update_v1(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.network_bridge_update_v1.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -982,6 +1025,33 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + active_leaves_update: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_statement_distribution_active_leaves_update", + "Time spent within `statement_distribution::active_leaves_update`", + ) + )?, + registry, + )?, + share: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_statement_distribution_share", + "Time spent within `statement_distribution::share`", + ) + )?, + registry, + )?, + network_bridge_update_v1: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "parachain_statement_distribution_network_bridge_update_v1", + "Time spent within `statement_distribution::network_bridge_update_v1`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } @@ -1110,8 +1180,8 @@ mod tests { #[test] fn note_local_works() { - let hash_a: Hash = [1; 32].into(); - let hash_b: Hash = [2; 32].into(); + let hash_a = CandidateHash([1; 32].into()); + let hash_b = CandidateHash([2; 32].into()); let mut per_peer_tracker = VcPerPeerTracker::default(); per_peer_tracker.note_local(hash_a.clone()); @@ -1126,9 +1196,9 @@ mod tests { #[test] fn note_remote_works() { - let hash_a: Hash = [1; 32].into(); - let hash_b: Hash = [2; 32].into(); - let hash_c: Hash = [3; 32].into(); + let hash_a = CandidateHash([1; 32].into()); + let hash_b = CandidateHash([2; 32].into()); + let hash_c = CandidateHash([3; 32].into()); let mut per_peer_tracker = VcPerPeerTracker::default(); assert!(per_peer_tracker.note_remote(hash_a.clone())); @@ -1148,7 +1218,7 @@ mod tests { fn per_peer_relay_parent_knowledge_send() { let mut knowledge = PeerRelayParentKnowledge::default(); - let hash_a: Hash = [1; 32].into(); + let hash_a = CandidateHash([1; 32].into()); // Sending an un-pinned statement should not work and should have no effect. assert!(knowledge.send(&(CompactStatement::Valid(hash_a), 0)).is_none()); @@ -1180,7 +1250,7 @@ mod tests { fn cant_send_after_receiving() { let mut knowledge = PeerRelayParentKnowledge::default(); - let hash_a: Hash = [1; 32].into(); + let hash_a = CandidateHash([1; 32].into()); assert!(knowledge.receive(&(CompactStatement::Candidate(hash_a), 0), 3).unwrap()); assert!(knowledge.send(&(CompactStatement::Candidate(hash_a), 0)).is_none()); } @@ -1189,7 +1259,7 @@ mod tests { fn per_peer_relay_parent_knowledge_receive() { let mut knowledge = PeerRelayParentKnowledge::default(); - let hash_a: Hash = [1; 32].into(); + let hash_a = CandidateHash([1; 32].into()); assert_eq!( knowledge.receive(&(CompactStatement::Valid(hash_a), 0), 3), @@ -1226,8 +1296,8 @@ mod tests { assert_eq!(knowledge.received_statements.len(), 3); // number of prior `Ok`s. // Now make sure that the seconding limit is respected. - let hash_b: Hash = [2; 32].into(); - let hash_c: Hash = [3; 32].into(); + let hash_b = CandidateHash([2; 32].into()); + let hash_c = CandidateHash([3; 32].into()); assert_eq!( knowledge.receive(&(CompactStatement::Candidate(hash_b), 0), 3), @@ -1355,7 +1425,7 @@ mod tests { &active_heads, new_view.clone(), &Default::default(), - ).await.unwrap(); + ).await; assert_eq!(peer_data.view, new_view); assert!(!peer_data.view_knowledge.contains_key(&hash_a)); @@ -1471,7 +1541,7 @@ mod tests { &mut ctx, hash_b, &statement, - ).await.unwrap(); + ).await; { assert_eq!(needs_dependents.len(), 2); diff --git a/node/overseer/Cargo.toml b/node/overseer/Cargo.toml index 5341cb42e01483a64603f265a81bd1baf4370471..037910e1fa66ade67085ccdbcf603148a906ceeb 100644 --- a/node/overseer/Cargo.toml +++ b/node/overseer/Cargo.toml @@ -5,22 +5,22 @@ authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.5" -log = "0.4.11" +futures = "0.3.8" +tracing = "0.1.22" +tracing-futures = "0.2.4" futures-timer = "3.0.2" streamunordered = "0.5.1" polkadot-primitives = { path = "../../primitives" } -client = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "master" } +client = { package = "sc-client-api", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-node-primitives = { package = "polkadot-node-primitives", path = "../primitives" } -async-trait = "0.1" +async-trait = "0.1.42" [dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } polkadot-node-network-protocol = { path = "../network/protocol" } -futures = { version = "0.3.5", features = ["thread-pool"] } +futures = { version = "0.3.8", features = ["thread-pool"] } futures-timer = "3.0.2" femme = "2.1.1" -log = "0.4.11" kv-log-macro = "1.0.7" diff --git a/node/overseer/examples/minimal-example.rs b/node/overseer/examples/minimal-example.rs index 9c0864a9b795d719c2134ca1f1cc527585fdde65..e481d38adcc6bdfbe1f78ea170348de3320cebb3 100644 --- a/node/overseer/examples/minimal-example.rs +++ b/node/overseer/examples/minimal-example.rs @@ -25,7 +25,6 @@ use futures::{ FutureExt, StreamExt, }; use futures_timer::Delay; -use kv_log_macro as log; use polkadot_primitives::v1::{BlockData, PoV}; use polkadot_overseer::{Overseer, AllSubsystems}; @@ -43,13 +42,13 @@ impl Subsystem1 { match ctx.try_recv().await { Ok(Some(msg)) => { if let FromOverseer::Communication { msg } = msg { - log::info!("msg {:?}", msg); + tracing::info!("msg {:?}", msg); } continue; } Ok(None) => (), Err(_) => { - log::info!("exiting"); + tracing::info!("exiting"); return; } } @@ -65,7 +64,7 @@ impl Subsystem1 { }.into(), tx, ) - )).await.unwrap(); + )).await; } } } @@ -76,6 +75,7 @@ impl Subsystem for Subsystem1 fn start(self, ctx: C) -> SpawnedSubsystem { let future = Box::pin(async move { Self::run(ctx).await; + Ok(()) }); SpawnedSubsystem { @@ -93,7 +93,7 @@ impl Subsystem2 { "subsystem-2-job", Box::pin(async { loop { - log::info!("Job tick"); + tracing::info!("Job tick"); Delay::new(Duration::from_secs(1)).await; } }), @@ -102,12 +102,12 @@ impl Subsystem2 { loop { match ctx.try_recv().await { Ok(Some(msg)) => { - log::info!("Subsystem2 received message {:?}", msg); + tracing::info!("Subsystem2 received message {:?}", msg); continue; } Ok(None) => { pending!(); } Err(_) => { - log::info!("exiting"); + tracing::info!("exiting"); return; }, } @@ -121,6 +121,7 @@ impl Subsystem for Subsystem2 fn start(self, ctx: C) -> SpawnedSubsystem { let future = Box::pin(async move { Self::run(ctx).await; + Ok(()) }); SpawnedSubsystem { @@ -157,7 +158,7 @@ fn main() { select! { _ = overseer_fut => break, _ = timer_stream.next() => { - log::info!("tick"); + tracing::info!("tick"); } complete => break, } diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 944d56cb9a7914fd80652517484635ea8bcb550d..bbd9f626fe383fbe405ea9075aa1717c294541e4 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -68,7 +68,7 @@ use std::collections::{hash_map, HashMap}; use futures::channel::{mpsc, oneshot}; use futures::{ - pending, poll, select, + poll, select, future::BoxFuture, stream::{self, FuturesUnordered}, Future, FutureExt, SinkExt, StreamExt, @@ -135,6 +135,7 @@ enum ToOverseer { /// This structure exists solely for the purposes of decoupling /// `Overseer` code from the client code and the necessity to call /// `HeaderBackend::block_number_from_id()`. +#[derive(Debug)] pub struct BlockInfo { /// hash of the block. pub hash: Hash, @@ -191,18 +192,21 @@ pub struct OverseerHandler { impl OverseerHandler { /// Inform the `Overseer` that that some block was imported. - pub async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { - self.events_tx.send(Event::BlockImported(block)).await.map_err(Into::into) + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + pub async fn block_imported(&mut self, block: BlockInfo) { + self.send_and_log_error(Event::BlockImported(block)).await } /// Send some message to one of the `Subsystem`s. - pub async fn send_msg(&mut self, msg: impl Into) -> SubsystemResult<()> { - self.events_tx.send(Event::MsgToSubsystem(msg.into())).await.map_err(Into::into) + #[tracing::instrument(level = "trace", skip(self, msg), fields(subsystem = LOG_TARGET))] + pub async fn send_msg(&mut self, msg: impl Into) { + self.send_and_log_error(Event::MsgToSubsystem(msg.into())).await } /// Inform the `Overseer` that that some block was finalized. - pub async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { - self.events_tx.send(Event::BlockFinalized(block)).await.map_err(Into::into) + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + pub async fn block_finalized(&mut self, block: BlockInfo) { + self.send_and_log_error(Event::BlockFinalized(block)).await } /// Wait for a block with the given hash to be in the active-leaves set. @@ -212,16 +216,24 @@ impl OverseerHandler { /// Note that due the fact the overseer doesn't store the whole active-leaves set, only deltas, /// the response channel may never return if the hash was deactivated before this call. /// In this case, it's the caller's responsibility to ensure a timeout is set. - pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) -> SubsystemResult<()> { - self.events_tx.send(Event::ExternalRequest(ExternalRequest::WaitForActivation { + #[tracing::instrument(level = "trace", skip(self, response_channel), fields(subsystem = LOG_TARGET))] + pub async fn wait_for_activation(&mut self, hash: Hash, response_channel: oneshot::Sender>) { + self.send_and_log_error(Event::ExternalRequest(ExternalRequest::WaitForActivation { hash, response_channel - })).await.map_err(Into::into) + })).await } /// Tell `Overseer` to shutdown. - pub async fn stop(&mut self) -> SubsystemResult<()> { - self.events_tx.send(Event::Stop).await.map_err(Into::into) + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] + pub async fn stop(&mut self) { + self.send_and_log_error(Event::Stop).await + } + + async fn send_and_log_error(&mut self, event: Event) { + if self.events_tx.send(event).await.is_err() { + tracing::info!(target: LOG_TARGET, "Failed to send an event to Overseer"); + } } } @@ -233,7 +245,7 @@ impl OverseerHandler { pub async fn forward_events>( client: Arc

, mut handler: OverseerHandler, -) -> SubsystemResult<()> { +) { let mut finality = client.finality_notification_stream(); let mut imports = client.import_notification_stream(); @@ -242,7 +254,7 @@ pub async fn forward_events>( f = finality.next() => { match f { Some(block) => { - handler.block_finalized(block.into()).await?; + handler.block_finalized(block.into()).await; } None => break, } @@ -250,7 +262,7 @@ pub async fn forward_events>( i = imports.next() => { match i { Some(block) => { - handler.block_imported(block.into()).await?; + handler.block_imported(block.into()).await; } None => break, } @@ -258,8 +270,6 @@ pub async fn forward_events>( complete => break, } } - - Ok(()) } impl Debug for ToOverseer { @@ -332,15 +342,34 @@ impl SubsystemContext for OverseerSubsystemContext { }).await.map_err(Into::into) } - async fn send_message(&mut self, msg: AllMessages) -> SubsystemResult<()> { - self.tx.send(ToOverseer::SubsystemMessage(msg)).await.map_err(Into::into) + async fn send_message(&mut self, msg: AllMessages) { + self.send_and_log_error(ToOverseer::SubsystemMessage(msg)).await } - async fn send_messages(&mut self, msgs: T) -> SubsystemResult<()> + async fn send_messages(&mut self, msgs: T) where T: IntoIterator + Send, T::IntoIter: Send { let mut msgs = stream::iter(msgs.into_iter().map(ToOverseer::SubsystemMessage).map(Ok)); - self.tx.send_all(&mut msgs).await.map_err(Into::into) + if self.tx.send_all(&mut msgs).await.is_err() { + tracing::debug!( + target: LOG_TARGET, + msg_type = std::any::type_name::(), + "Failed to send messages to Overseer", + ); + + } + } +} + +impl OverseerSubsystemContext { + async fn send_and_log_error(&mut self, msg: ToOverseer) { + if self.tx.send(msg).await.is_err() { + tracing::debug!( + target: LOG_TARGET, + msg_type = std::any::type_name::(), + "Failed to send a message to Overseer", + ); + } } } @@ -355,6 +384,30 @@ struct OverseenSubsystem { instance: Option>, } +impl OverseenSubsystem { + /// Send a message to the wrapped subsystem. + /// + /// If the inner `instance` is `None`, nothing is happening. + async fn send_message(&mut self, msg: M) -> SubsystemResult<()> { + if let Some(ref mut instance) = self.instance { + instance.tx.send(FromOverseer::Communication { msg }).await?; + } + + Ok(()) + } + + /// Send a signal to the wrapped subsystem. + /// + /// If the inner `instance` is `None`, nothing is happening. + async fn send_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { + if let Some(ref mut instance) = self.instance { + instance.tx.send(FromOverseer::Signal(signal)).await?; + } + + Ok(()) + } +} + /// The `Overseer` itself. pub struct Overseer { /// A candidate validation subsystem. @@ -1211,71 +1264,27 @@ where // Stop the overseer. async fn stop(mut self) { - if let Some(ref mut s) = self.candidate_validation_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.candidate_backing_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.candidate_selection_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.statement_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.availability_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.bitfield_signing_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.bitfield_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.provisioner_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.pov_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.runtime_api_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.availability_store_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.network_bridge_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.chain_api_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.collator_protocol_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } - - if let Some(ref mut s) = self.collation_generation_subsystem.instance { - let _ = s.tx.send(FromOverseer::Signal(OverseerSignal::Conclude)).await; - } + let _ = self.candidate_validation_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.candidate_backing_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.candidate_selection_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.statement_distribution_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.availability_distribution_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.bitfield_signing_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.bitfield_distribution_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.provisioner_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.pov_distribution_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.runtime_api_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.availability_store_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.network_bridge_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.chain_api_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.collator_protocol_subsystem.send_signal(OverseerSignal::Conclude).await; + let _ = self.collation_generation_subsystem.send_signal(OverseerSignal::Conclude).await; let mut stop_delay = Delay::new(Duration::from_secs(STOP_DELAY)).fuse(); loop { select! { - x = self.running_subsystems.next() => { + _ = self.running_subsystems.next() => { if self.running_subsystems.is_empty() { break; } @@ -1287,11 +1296,11 @@ where } /// Run the `Overseer`. + #[tracing::instrument(skip(self), fields(subsystem = LOG_TARGET))] pub async fn run(mut self) -> SubsystemResult<()> { - let leaves = std::mem::take(&mut self.leaves); let mut update = ActiveLeavesUpdate::default(); - for (hash, number) in leaves.into_iter() { + for (hash, number) in std::mem::take(&mut self.leaves) { update.activated.push(hash); let _ = self.active_leaves.insert(hash, number); self.on_head_activated(&hash); @@ -1300,53 +1309,66 @@ where self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; loop { - while let Poll::Ready(Some(msg)) = poll!(&mut self.events_rx.next()) { - match msg { - Event::MsgToSubsystem(msg) => { - self.route_message(msg).await; - } - Event::Stop => { - self.stop().await; - return Ok(()); - } - Event::BlockImported(block) => { - self.block_imported(block).await?; - } - Event::BlockFinalized(block) => { - self.block_finalized(block).await?; - } - Event::ExternalRequest(request) => { - self.handle_external_request(request); - } - } - } - - while let Poll::Ready(Some((StreamYield::Item(msg), _))) = poll!( - &mut self.running_subsystems_rx.next() - ) { - match msg { - ToOverseer::SubsystemMessage(msg) => self.route_message(msg).await, - ToOverseer::SpawnJob { name, s } => { - self.spawn_job(name, s); + select! { + msg = self.events_rx.next().fuse() => { + let msg = if let Some(msg) = msg { + msg + } else { + continue + }; + + match msg { + Event::MsgToSubsystem(msg) => { + self.route_message(msg).await; + } + Event::Stop => { + self.stop().await; + return Ok(()); + } + Event::BlockImported(block) => { + self.block_imported(block).await?; + } + Event::BlockFinalized(block) => { + self.block_finalized(block).await?; + } + Event::ExternalRequest(request) => { + self.handle_external_request(request); + } } - ToOverseer::SpawnBlockingJob { name, s } => { - self.spawn_blocking_job(name, s); + }, + msg = self.running_subsystems_rx.next().fuse() => { + let msg = if let Some((StreamYield::Item(msg), _)) = msg { + msg + } else { + continue + }; + + match msg { + ToOverseer::SubsystemMessage(msg) => self.route_message(msg).await, + ToOverseer::SpawnJob { name, s } => { + self.spawn_job(name, s); + } + ToOverseer::SpawnBlockingJob { name, s } => { + self.spawn_blocking_job(name, s); + } } - } - } - - // Some subsystem exited? It's time to panic. - if let Poll::Ready(Some(finished)) = poll!(self.running_subsystems.next()) { - log::error!(target: LOG_TARGET, "Subsystem finished unexpectedly {:?}", finished); - self.stop().await; - return finished; + }, + res = self.running_subsystems.next().fuse() => { + let finished = if let Some(finished) = res { + finished + } else { + continue + }; + + tracing::error!(target: LOG_TARGET, subsystem = ?finished, "subsystem finished unexpectedly"); + self.stop().await; + return finished; + }, } - - // Looks like nothing is left to be polled, let's take a break. - pending!(); } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_imported(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1376,6 +1398,7 @@ where Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn block_finalized(&mut self, block: BlockInfo) -> SubsystemResult<()> { let mut update = ActiveLeavesUpdate::default(); @@ -1392,158 +1415,91 @@ where self.on_head_deactivated(deactivated) } - self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; + // Most of the time we have a leave already closed when it is finalized, so we check here if there are actually + // any updates before sending it to the subsystems. + if !update.is_empty() { + self.broadcast_signal(OverseerSignal::ActiveLeaves(update)).await?; + } self.broadcast_signal(OverseerSignal::BlockFinalized(block.hash)).await?; Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn broadcast_signal(&mut self, signal: OverseerSignal) -> SubsystemResult<()> { - if let Some(ref mut s) = self.candidate_validation_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.candidate_backing_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.candidate_selection_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.statement_distribution_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.availability_distribution_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.bitfield_distribution_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.bitfield_signing_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.provisioner_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.pov_distribution_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.runtime_api_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.availability_store_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.network_bridge_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.chain_api_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.collator_protocol_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } - - if let Some(ref mut s) = self.collation_generation_subsystem.instance { - s.tx.send(FromOverseer::Signal(signal.clone())).await?; - } + self.candidate_validation_subsystem.send_signal(signal.clone()).await?; + self.candidate_backing_subsystem.send_signal(signal.clone()).await?; + self.candidate_selection_subsystem.send_signal(signal.clone()).await?; + self.statement_distribution_subsystem.send_signal(signal.clone()).await?; + self.availability_distribution_subsystem.send_signal(signal.clone()).await?; + self.bitfield_signing_subsystem.send_signal(signal.clone()).await?; + self.bitfield_distribution_subsystem.send_signal(signal.clone()).await?; + self.provisioner_subsystem.send_signal(signal.clone()).await?; + self.pov_distribution_subsystem.send_signal(signal.clone()).await?; + self.runtime_api_subsystem.send_signal(signal.clone()).await?; + self.availability_store_subsystem.send_signal(signal.clone()).await?; + self.network_bridge_subsystem.send_signal(signal.clone()).await?; + self.chain_api_subsystem.send_signal(signal.clone()).await?; + self.collator_protocol_subsystem.send_signal(signal.clone()).await?; + self.collation_generation_subsystem.send_signal(signal).await?; Ok(()) } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] async fn route_message(&mut self, msg: AllMessages) { self.metrics.on_message_relayed(); match msg { AllMessages::CandidateValidation(msg) => { - if let Some(ref mut s) = self.candidate_validation_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.candidate_validation_subsystem.send_message(msg).await; + }, AllMessages::CandidateBacking(msg) => { - if let Some(ref mut s) = self.candidate_backing_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.candidate_backing_subsystem.send_message(msg).await; + }, AllMessages::CandidateSelection(msg) => { - if let Some(ref mut s) = self.candidate_selection_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.candidate_selection_subsystem.send_message(msg).await; + }, AllMessages::StatementDistribution(msg) => { - if let Some(ref mut s) = self.statement_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.statement_distribution_subsystem.send_message(msg).await; + }, AllMessages::AvailabilityDistribution(msg) => { - if let Some(ref mut s) = self.availability_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.availability_distribution_subsystem.send_message(msg).await; + }, AllMessages::BitfieldDistribution(msg) => { - if let Some(ref mut s) = self.bitfield_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.bitfield_distribution_subsystem.send_message(msg).await; + }, AllMessages::BitfieldSigning(msg) => { - if let Some(ref mut s) = self.bitfield_signing_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication{ msg }).await; - } - } + let _ = self.bitfield_signing_subsystem.send_message(msg).await; + }, AllMessages::Provisioner(msg) => { - if let Some(ref mut s) = self.provisioner_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.provisioner_subsystem.send_message(msg).await; + }, AllMessages::PoVDistribution(msg) => { - if let Some(ref mut s) = self.pov_distribution_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.pov_distribution_subsystem.send_message(msg).await; + }, AllMessages::RuntimeApi(msg) => { - if let Some(ref mut s) = self.runtime_api_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.runtime_api_subsystem.send_message(msg).await; + }, AllMessages::AvailabilityStore(msg) => { - if let Some(ref mut s) = self.availability_store_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.availability_store_subsystem.send_message(msg).await; + }, AllMessages::NetworkBridge(msg) => { - if let Some(ref mut s) = self.network_bridge_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.network_bridge_subsystem.send_message(msg).await; + }, AllMessages::ChainApi(msg) => { - if let Some(ref mut s) = self.chain_api_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.chain_api_subsystem.send_message(msg).await; + }, AllMessages::CollationGeneration(msg) => { - if let Some(ref mut s) = self.collation_generation_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.collation_generation_subsystem.send_message(msg).await; + }, AllMessages::CollatorProtocol(msg) => { - if let Some(ref mut s) = self.collator_protocol_subsystem.instance { - let _ = s.tx.send(FromOverseer::Communication { msg }).await; - } - } + let _ = self.collator_protocol_subsystem.send_message(msg).await; + }, } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_activated(&mut self, hash: &Hash) { self.metrics.on_head_activated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -1554,6 +1510,7 @@ where } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn on_head_deactivated(&mut self, hash: &Hash) { self.metrics.on_head_deactivated(); if let Some(listeners) = self.activation_external_listeners.remove(hash) { @@ -1562,6 +1519,7 @@ where } } + #[tracing::instrument(level = "trace", skip(self), fields(subsystem = LOG_TARGET))] fn clean_up_external_listeners(&mut self) { self.activation_external_listeners.retain(|_, v| { // remove dead listeners @@ -1570,6 +1528,7 @@ where }) } + #[tracing::instrument(level = "trace", skip(self, request), fields(subsystem = LOG_TARGET))] fn handle_external_request(&mut self, request: ExternalRequest) { match request { ExternalRequest::WaitForActivation { hash, response_channel } => { @@ -1606,14 +1565,18 @@ fn spawn( let (tx, rx) = oneshot::channel(); let fut = Box::pin(async move { - future.await; + if let Err(e) = future.await { + tracing::error!(subsystem=name, err = ?e, "subsystem exited with error"); + } else { + tracing::debug!(subsystem=name, "subsystem exited without an error"); + } let _ = tx.send(()); }); spawner.spawn(name, fut); let _ = streams.push(from_rx); - futures.push(Box::pin(rx.map(|e| { log::warn!("Dropping error {:?}", e); Ok(()) }))); + futures.push(Box::pin(rx.map(|e| { tracing::warn!(err = ?e, "dropping error"); Ok(()) }))); let instance = Some(SubsystemInstance { tx: to_tx, @@ -1629,9 +1592,9 @@ fn spawn( mod tests { use std::sync::atomic; use std::collections::HashMap; - use futures::{executor, pin_mut, select, channel::mpsc, FutureExt}; + use futures::{executor, pin_mut, select, channel::mpsc, FutureExt, pending}; - use polkadot_primitives::v1::{BlockData, CollatorPair, PoV}; + use polkadot_primitives::v1::{BlockData, CollatorPair, PoV, CandidateHash}; use polkadot_subsystem::messages::RuntimeApiRequest; use polkadot_node_primitives::{Collation, CollationGenerationConfig}; use polkadot_node_network_protocol::{PeerId, ReputationChange, NetworkBridgeEvent}; @@ -1658,8 +1621,8 @@ mod tests { i += 1; continue; } - Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return, - Err(_) => return, + Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return Ok(()), + Err(_) => return Ok(()), _ => (), } } @@ -1693,7 +1656,7 @@ mod tests { tx, ) ) - ).await.unwrap(); + ).await; c += 1; continue; } @@ -1704,11 +1667,13 @@ mod tests { Ok(Some(_)) => { continue; } - Err(_) => return, + Err(_) => return Ok(()), _ => (), } pending!(); } + + Ok(()) }), } } @@ -1724,6 +1689,7 @@ mod tests { name: "test-subsystem-4", future: Box::pin(async move { // Do nothing and exit. + Ok(()) }), } } @@ -1758,13 +1724,13 @@ mod tests { loop { select! { - a = overseer_fut => break, + _ = overseer_fut => break, s1_next = s1_rx.next() => { match s1_next { Some(msg) => { s1_results.push(msg); if s1_results.len() == 10 { - handler.stop().await.unwrap(); + handler.stop().await; } } None => break, @@ -1772,7 +1738,7 @@ mod tests { }, s2_next = s2_rx.next() => { match s2_next { - Some(msg) => s2_results.push(s2_next), + Some(_) => s2_results.push(s2_next), None => break, } }, @@ -1822,10 +1788,10 @@ mod tests { pin_mut!(overseer_fut); - handler.block_imported(second_block).await.unwrap(); - handler.block_imported(third_block).await.unwrap(); - handler.send_msg(AllMessages::CandidateValidation(test_candidate_validation_msg())).await.unwrap(); - handler.stop().await.unwrap(); + handler.block_imported(second_block).await; + handler.block_imported(third_block).await; + handler.send_msg(AllMessages::CandidateValidation(test_candidate_validation_msg())).await; + handler.stop().await; select! { res = overseer_fut => { @@ -1902,11 +1868,13 @@ mod tests { continue; }, Ok(Some(_)) => continue, - Err(_) => return, + Err(_) => break, _ => (), } pending!(); } + + Ok(()) }), } } @@ -1931,11 +1899,13 @@ mod tests { continue; }, Ok(Some(_)) => continue, - Err(_) => return, + Err(_) => break, _ => (), } pending!(); } + + Ok(()) }), } } @@ -1986,8 +1956,8 @@ mod tests { let mut ss5_results = Vec::new(); let mut ss6_results = Vec::new(); - handler.block_imported(second_block).await.unwrap(); - handler.block_imported(third_block).await.unwrap(); + handler.block_imported(second_block).await; + handler.block_imported(third_block).await; let expected_heartbeats = vec![ OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work(first_block_hash)), @@ -2022,7 +1992,7 @@ mod tests { if ss5_results.len() == expected_heartbeats.len() && ss6_results.len() == expected_heartbeats.len() { - handler.stop().await.unwrap(); + handler.stop().await; } } @@ -2080,7 +2050,7 @@ mod tests { let mut ss6_results = Vec::new(); // this should stop work on both forks we started with earlier. - handler.block_finalized(third_block).await.unwrap(); + handler.block_finalized(third_block).await; let expected_heartbeats = vec![ OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { @@ -2115,7 +2085,7 @@ mod tests { if ss5_results.len() == expected_heartbeats.len() && ss6_results.len() == expected_heartbeats.len() { - handler.stop().await.unwrap(); + handler.stop().await; } } @@ -2180,6 +2150,8 @@ mod tests { } pending!(); } + + Ok(()) }), } } @@ -2193,7 +2165,7 @@ mod tests { fn test_candidate_backing_msg() -> CandidateBackingMessage { let (sender, _) = oneshot::channel(); - CandidateBackingMessage::GetBackedCandidates(Default::default(), sender) + CandidateBackingMessage::GetBackedCandidates(Default::default(), Vec::new(), sender) } fn test_candidate_selection_msg() -> CandidateSelectionMessage { @@ -2260,7 +2232,7 @@ mod tests { fn test_availability_store_msg() -> AvailabilityStoreMessage { let (sender, _) = oneshot::channel(); - AvailabilityStoreMessage::QueryAvailableData(Default::default(), sender) + AvailabilityStoreMessage::QueryAvailableData(CandidateHash(Default::default()), sender) } fn test_network_bridge_msg() -> NetworkBridgeMessage { @@ -2315,28 +2287,28 @@ mod tests { hash: Default::default(), parent_hash: Default::default(), number: Default::default(), - }).await.unwrap(); + }).await; // send a msg to each subsystem // except for BitfieldSigning as the message is not instantiable - handler.send_msg(AllMessages::CandidateValidation(test_candidate_validation_msg())).await.unwrap(); - handler.send_msg(AllMessages::CandidateBacking(test_candidate_backing_msg())).await.unwrap(); - handler.send_msg(AllMessages::CandidateSelection(test_candidate_selection_msg())).await.unwrap(); - handler.send_msg(AllMessages::CollationGeneration(test_collator_generation_msg())).await.unwrap(); - handler.send_msg(AllMessages::CollatorProtocol(test_collator_protocol_msg())).await.unwrap(); - handler.send_msg(AllMessages::StatementDistribution(test_statement_distribution_msg())).await.unwrap(); - handler.send_msg(AllMessages::AvailabilityDistribution(test_availability_distribution_msg())).await.unwrap(); - // handler.send_msg(AllMessages::BitfieldSigning(test_bitfield_signing_msg())).await.unwrap(); - handler.send_msg(AllMessages::BitfieldDistribution(test_bitfield_distribution_msg())).await.unwrap(); - handler.send_msg(AllMessages::Provisioner(test_provisioner_msg())).await.unwrap(); - handler.send_msg(AllMessages::PoVDistribution(test_pov_distribution_msg())).await.unwrap(); - handler.send_msg(AllMessages::RuntimeApi(test_runtime_api_msg())).await.unwrap(); - handler.send_msg(AllMessages::AvailabilityStore(test_availability_store_msg())).await.unwrap(); - handler.send_msg(AllMessages::NetworkBridge(test_network_bridge_msg())).await.unwrap(); - handler.send_msg(AllMessages::ChainApi(test_chain_api_msg())).await.unwrap(); + handler.send_msg(AllMessages::CandidateValidation(test_candidate_validation_msg())).await; + handler.send_msg(AllMessages::CandidateBacking(test_candidate_backing_msg())).await; + handler.send_msg(AllMessages::CandidateSelection(test_candidate_selection_msg())).await; + handler.send_msg(AllMessages::CollationGeneration(test_collator_generation_msg())).await; + handler.send_msg(AllMessages::CollatorProtocol(test_collator_protocol_msg())).await; + handler.send_msg(AllMessages::StatementDistribution(test_statement_distribution_msg())).await; + handler.send_msg(AllMessages::AvailabilityDistribution(test_availability_distribution_msg())).await; + // handler.send_msg(AllMessages::BitfieldSigning(test_bitfield_signing_msg())).await; + handler.send_msg(AllMessages::BitfieldDistribution(test_bitfield_distribution_msg())).await; + handler.send_msg(AllMessages::Provisioner(test_provisioner_msg())).await; + handler.send_msg(AllMessages::PoVDistribution(test_pov_distribution_msg())).await; + handler.send_msg(AllMessages::RuntimeApi(test_runtime_api_msg())).await; + handler.send_msg(AllMessages::AvailabilityStore(test_availability_store_msg())).await; + handler.send_msg(AllMessages::NetworkBridge(test_network_bridge_msg())).await; + handler.send_msg(AllMessages::ChainApi(test_chain_api_msg())).await; // send a stop signal to each subsystems - handler.stop().await.unwrap(); + handler.stop().await; select! { res = overseer_fut => { diff --git a/node/primitives/Cargo.toml b/node/primitives/Cargo.toml index 81e2467b374fd0dbeac1715a6aea7a875e7969c4..bd82cf1037b2200089334a0ad9bb328d4022999b 100644 --- a/node/primitives/Cargo.toml +++ b/node/primitives/Cargo.toml @@ -6,9 +6,9 @@ edition = "2018" description = "Primitives types for the Node-side" [dependencies] -futures = "0.3.5" +futures = "0.3.8" polkadot-primitives = { path = "../../primitives" } polkadot-statement-table = { path = "../../statement-table" } -parity-scale-codec = { version = "1.3.4", default-features = false, features = ["derive"] } -runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index ddcec114ebe2510f9d147742e82da1b101792acf..0ea2799daac40ba1d388f29147df287c7172a0a7 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -28,7 +28,7 @@ use polkadot_primitives::v1::{ Hash, CommittedCandidateReceipt, CandidateReceipt, CompactStatement, EncodeAs, Signed, SigningContext, ValidatorIndex, ValidatorId, UpwardMessage, ValidationCode, PersistedValidationData, ValidationData, - HeadData, PoV, CollatorPair, Id as ParaId, ValidationOutputs, + HeadData, PoV, CollatorPair, Id as ParaId, OutboundHrmpMessage, CandidateCommitments, CandidateHash, }; use polkadot_statement_table::{ generic::{ @@ -54,10 +54,10 @@ pub enum Statement { Seconded(CommittedCandidateReceipt), /// A statement that a validator has deemed a candidate valid. #[codec(index = "2")] - Valid(Hash), + Valid(CandidateHash), /// A statement that a validator has deemed a candidate invalid. #[codec(index = "3")] - Invalid(Hash), + Invalid(CandidateHash), } impl Statement { @@ -144,7 +144,7 @@ pub enum InvalidCandidate { pub enum ValidationResult { /// Candidate is valid. The validation process yields these outputs and the persisted validation /// data used to form inputs. - Valid(ValidationOutputs, PersistedValidationData), + Valid(CandidateCommitments, PersistedValidationData), /// Candidate is invalid. Invalid(InvalidCandidate), } @@ -252,9 +252,11 @@ impl std::convert::TryFrom for MisbehaviorReport { /// - does not contain the erasure root; that's computed at the Polkadot level, not at Cumulus /// - contains a proof of validity. #[derive(Clone, Encode, Decode)] -pub struct Collation { +pub struct Collation { /// Messages destined to be interpreted by the Relay chain itself. pub upward_messages: Vec, + /// The horizontal messages sent by the parachain. + pub horizontal_messages: Vec>, /// New validation code. pub new_validation_code: Option, /// The head-data produced as a result of execution. @@ -263,18 +265,27 @@ pub struct Collation { pub proof_of_validity: PoV, /// The number of messages processed from the DMQ. pub processed_downward_messages: u32, + /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + pub hrmp_watermark: BlockNumber, } +/// Collation function. +/// +/// Will be called with the hash of the relay chain block the parachain +/// block should be build on and the [`ValidationData`] that provides +/// information about the state of the parachain on the relay chain. +pub type CollatorFn = Box< + dyn Fn(Hash, &ValidationData) -> Pin> + Send>> + + Send + + Sync, +>; + /// Configuration for the collation generator pub struct CollationGenerationConfig { /// Collator's authentication key, so it can sign things. pub key: CollatorPair, - /// Collation function. - /// - /// Will be called with the hash of the relay chain block the parachain - /// block should be build on and the [`ValidationData`] that provides - /// information about the state of the parachain on the relay chain. - pub collator: Box Pin> + Send>> + Send + Sync>, + /// Collation function. See [`CollatorFn`] for more details. + pub collator: CollatorFn, /// The parachain that this collator collates for pub para_id: ParaId, } diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 78b25605c1ca11c59fd6bf61aa34c07299b6ad41..0f53151786d010a3df463b632ef448f93573c15e 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -6,59 +6,57 @@ edition = "2018" [dependencies] # Substrate Client -authority-discovery = { package = "sc-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } -babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } -grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } -sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-db = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -service = { package = "sc-service", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -telemetry = { package = "sc-telemetry", git = "https://github.com/paritytech/substrate", branch = "master" } +sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-client-db = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +service = { package = "sc-service", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +telemetry = { package = "sc-telemetry", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } # Substrate Primitives -authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } -consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } -grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-offchain = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-storage = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-offchain = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-storage = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } # Substrate Pallets -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } # Substrate Other -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "master" } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } # External Crates -codec = { package = "parity-scale-codec", version = "1.3.4" } -futures = "0.3.4" -hex-literal = "0.2.1" -lazy_static = "1.4.0" -log = "0.4.8" -parking_lot = "0.9.0" -serde = { version = "1.0.102", features = ["derive"] } -slog = "2.5.2" +futures = "0.3.8" +hex-literal = "0.3.1" +tracing = "0.1.22" +tracing-futures = "0.2.4" +serde = { version = "1.0.118", features = ["derive"] } # Polkadot polkadot-node-core-proposer = { path = "../core/proposer" } @@ -68,12 +66,13 @@ polkadot-primitives = { path = "../../primitives" } polkadot-rpc = { path = "../../rpc" } polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } +polkadot-runtime-parachains = { path = "../../runtime/parachains" } # Polkadot Runtimes polkadot-runtime = { path = "../../runtime/polkadot" } kusama-runtime = { path = "../../runtime/kusama" } westend-runtime = { path = "../../runtime/westend" } -rococo-runtime = { package = "rococo-v1-runtime", path = "../../runtime/rococo-v1" } +rococo-runtime = { path = "../../runtime/rococo" } # Polkadot Subsystems polkadot-availability-bitfield-distribution = { path = "../network/bitfield-distribution", optional = true } @@ -94,19 +93,21 @@ polkadot-statement-distribution = { path = "../network/statement-distribution", [dev-dependencies] polkadot-test-client = { path = "../test/client" } -env_logger = "0.8.1" +env_logger = "0.8.2" [features] default = ["db", "full-node"] db = ["service/db"] -runtime-benchmarks = ["polkadot-runtime/runtime-benchmarks", "kusama-runtime/runtime-benchmarks", "westend-runtime/runtime-benchmarks"] full-node = [ + "polkadot-node-core-av-store", +] +runtime-benchmarks = ["polkadot-runtime/runtime-benchmarks", "kusama-runtime/runtime-benchmarks", "westend-runtime/runtime-benchmarks"] +real-overseer = [ "polkadot-availability-bitfield-distribution", "polkadot-availability-distribution", "polkadot-collator-protocol", "polkadot-network-bridge", "polkadot-node-collation-generation", - "polkadot-node-core-av-store", "polkadot-node-core-backing", "polkadot-node-core-bitfield-signing", "polkadot-node-core-candidate-selection", diff --git a/node/service/res/kusama.json b/node/service/res/kusama.json index 34c2180184264a7635ef3d4a1aa5c18b99aa7a32..ba13740f7293ece5c62adef44581a5cb273eb7a2 100644 --- a/node/service/res/kusama.json +++ b/node/service/res/kusama.json @@ -8,6 +8,12 @@ "/dns/p2p.cc3-3.kusama.network/tcp/30100/p2p/12D3KooWEGHw84b4hfvXEfyq4XWEmWCbRGuHMHQMpby4BAtZ4xJf", "/dns/p2p.cc3-4.kusama.network/tcp/30100/p2p/12D3KooWF9KDPRMN8WpeyXhEeURZGP8Dmo7go1tDqi7hTYpxV9uW", "/dns/p2p.cc3-5.kusama.network/tcp/30100/p2p/12D3KooWDiwMeqzvgWNreS9sV1HW3pZv1PA7QGA7HUCo7FzN5gcA", + "/dns/p2p.0.kusama.network/tcp/30333/p2p/12D3KooWJDohybWd7FvRmyeGjgi56yy36mRWLHmgRprFdUadUt6b", + "/dns/p2p.1.kusama.network/tcp/30333/p2p/12D3KooWC7dnTvDY97afoLrvQSBrh7dDFEkWniTwyxAsBjfpaZk6", + "/dns/p2p.2.kusama.network/tcp/30333/p2p/12D3KooWGGK6Mj1pWF1bk4R1HjBQ4E7bgkfSJ5gmEfVRuwRZapT5", + "/dns/p2p.3.kusama.network/tcp/30333/p2p/12D3KooWRp4qgusMiUobJ9Uw1XAwtsokqx9YwgHDv5wQXjxqETji", + "/dns/p2p.4.kusama.network/tcp/30333/p2p/12D3KooWMVXPbqWR1erNKRSWDVPjcAQ9XtxqLTVzV4ccox9Y8KNL", + "/dns/p2p.5.kusama.network/tcp/30333/p2p/12D3KooWBsJKGJFuv83ixryzMsUS53A8JzEVeTA8PGi4U6T2dnif", "/dns/kusama-bootnode-0.paritytech.net/tcp/30333/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", "/dns/kusama-bootnode-0.paritytech.net/tcp/30334/ws/p2p/12D3KooWSueCPH3puP2PcvqPJdNaDNF3jMZjtJtDiSy35pWrbt5h", "/dns/kusama-bootnode-1.paritytech.net/tcp/30333/p2p/12D3KooWQKqane1SqWJNWMQkbia9qiMWXkcHtAdfW5eVF8hbwEDw" diff --git a/node/service/res/polkadot.json b/node/service/res/polkadot.json index d26d66badd37552eda52c42443976b33ea502810..fd8d989d7a3846fcc26922abb3fa3f44b2ef7cbf 100644 --- a/node/service/res/polkadot.json +++ b/node/service/res/polkadot.json @@ -9,6 +9,12 @@ "/dns/p2p.cc1-3.polkadot.network/tcp/30100/p2p/12D3KooWJ4eyPowiVcPU46pXuE2cDsiAmuBKXnFcFPapm4xKFdMJ", "/dns/p2p.cc1-4.polkadot.network/tcp/30100/p2p/12D3KooWNMUcqwSj38oEq1zHeGnWKmMvrCFnpMftw7JzjAtRj2rU", "/dns/p2p.cc1-5.polkadot.network/tcp/30100/p2p/12D3KooWDs6LnpmWDWgZyGtcLVr3E75CoBxzg1YZUPL5Bb1zz6fM", + "/dns/p2p.0.polkadot.network/tcp/30333/p2p/12D3KooWHsvEicXjWWraktbZ4MQBizuyADQtuEGr3NbDvtm5rFA5", + "/dns/p2p.1.polkadot.network/tcp/30333/p2p/12D3KooWQz2q2UWVCiy9cFX1hHYEmhSKQB2hjEZCccScHLGUPjcc", + "/dns/p2p.2.polkadot.network/tcp/30333/p2p/12D3KooWNHxjYbDLLbDNZ2tq1kXgif5MSiLTUWJKcDdedKu4KaG8", + "/dns/p2p.3.polkadot.network/tcp/30333/p2p/12D3KooWGJQysxrQcSvUWWNw88RkqYvJhH3ZcDpWJ8zrXKhLP5Vr", + "/dns/p2p.4.polkadot.network/tcp/30333/p2p/12D3KooWKer8bYqpYjwurVABu13mkELpX2X7mSpEicpjShLeg7D6", + "/dns/p2p.5.polkadot.network/tcp/30333/p2p/12D3KooWSRjL9LcEQd5u2fQTbyLxTEHq1tUFgQ6amXSp8Eu7TfKP", "/dns/cc1-0.parity.tech/tcp/30333/p2p/12D3KooWSz8r2WyCdsfWHgPyvD8GKQdJ1UAiRmrcrs8sQB3fe2KU", "/dns/cc1-1.parity.tech/tcp/30333/p2p/12D3KooWFN2mhgpkJsDBuNuE5427AcDrsib8EoqGMZmkxWwx3Md4" ], diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index cabed4105d60ba691852a2841828edaed93c10cc..0acc287feeaabf9d078c394abc745d6f9d29001f 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -16,7 +16,7 @@ //! Polkadot chain configurations. -use authority_discovery_primitives::AuthorityId as AuthorityDiscoveryId; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; use hex_literal::hex; @@ -768,6 +768,24 @@ fn rococo_staging_testnet_config_genesis(wasm_binary: &[u8]) -> rococo_runtime:: keys: vec![], }), pallet_staking: Some(Default::default()), + pallet_sudo: Some(rococo_runtime::SudoConfig { + key: endowed_accounts[0].clone(), + }), + parachains_configuration: Some(rococo_runtime::ParachainsConfigurationConfig { + config: polkadot_runtime_parachains::configuration::HostConfiguration { + validation_upgrade_frequency: 600u32, + validation_upgrade_delay: 300, + acceptance_period: 1200, + max_code_size: 5 * 1024 * 1024, + max_pov_size: 50 * 1024 * 1024, + max_head_data_size: 32 * 1024, + group_rotation_frequency: 20, + chain_availability_period: 4, + thread_availability_period: 4, + no_show_slots: 10, + ..Default::default() + }, + }), } } @@ -1176,7 +1194,7 @@ pub fn westend_testnet_genesis( pub fn rococo_testnet_genesis( wasm_binary: &[u8], initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ImOnlineId, ValidatorId, AuthorityDiscoveryId)>, - _root_key: AccountId, + root_key: AccountId, endowed_accounts: Option>, ) -> rococo_runtime::GenesisConfig { let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); @@ -1208,6 +1226,44 @@ pub fn rococo_testnet_genesis( keys: vec![], }), pallet_staking: Some(Default::default()), + pallet_sudo: Some(rococo_runtime::SudoConfig { key: root_key }), + parachains_configuration: Some(rococo_runtime::ParachainsConfigurationConfig { + config: polkadot_runtime_parachains::configuration::HostConfiguration { + validation_upgrade_frequency: 600u32, + validation_upgrade_delay: 300, + acceptance_period: 1200, + max_code_size: 5 * 1024 * 1024, + max_pov_size: 50 * 1024 * 1024, + max_head_data_size: 32 * 1024, + group_rotation_frequency: 20, + chain_availability_period: 4, + thread_availability_period: 4, + no_show_slots: 10, + max_upward_queue_count: 8, + max_upward_queue_size: 8 * 1024, + max_downward_message_size: 1024, + // this is approximatelly 4ms. + // + // Same as `4 * frame_support::weights::WEIGHT_PER_MILLIS`. We don't bother with + // an import since that's a made up number and should be replaced with a constant + // obtained by benchmarking anyway. + preferred_dispatchable_upward_messages_step_weight: 4 * 1_000_000_000, + max_upward_message_size: 1024, + max_upward_message_num_per_candidate: 5, + hrmp_open_request_ttl: 5, + hrmp_sender_deposit: 0, + hrmp_recipient_deposit: 0, + hrmp_channel_max_capacity: 8, + hrmp_channel_max_total_size: 8 * 1024, + hrmp_max_parachain_inbound_channels: 4, + hrmp_max_parathread_inbound_channels: 4, + hrmp_channel_max_message_size: 1024, + hrmp_max_parachain_outbound_channels: 4, + hrmp_max_parathread_outbound_channels: 4, + hrmp_max_message_num_per_candidate: 5, + ..Default::default() + }, + }), } } diff --git a/node/service/src/client.rs b/node/service/src/client.rs index 3161be303dccda29d942b824cfbf86e41cdf42cd..0074803219cf88484befa21e228729bd54c0e2ee 100644 --- a/node/service/src/client.rs +++ b/node/service/src/client.rs @@ -40,7 +40,7 @@ pub trait RuntimeApiCollection: + sp_api::Metadata + sp_offchain::OffchainWorkerApi + sp_session::SessionKeys - + authority_discovery_primitives::AuthorityDiscoveryApi + + sp_authority_discovery::AuthorityDiscoveryApi where >::StateBackend: sp_api::StateBackend, {} @@ -58,7 +58,7 @@ where + sp_api::Metadata + sp_offchain::OffchainWorkerApi + sp_session::SessionKeys - + authority_discovery_primitives::AuthorityDiscoveryApi, + + sp_authority_discovery::AuthorityDiscoveryApi, >::StateBackend: sp_api::StateBackend, {} @@ -115,7 +115,7 @@ pub trait ExecuteWithClient { fn execute_with_client(self, client: Arc) -> Self::Output where >::StateBackend: sp_api::StateBackend, - Backend: sc_client_api::Backend, + Backend: sc_client_api::Backend + 'static, Backend::State: sp_api::StateBackend, Api: crate::RuntimeApiCollection, Client: AbstractClient + 'static; diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 198b399e938b54562446e8906f2d077fbf553997..43f43657b8a6a183a91c3954e095b516c703b858 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -28,12 +28,12 @@ use { std::convert::TryInto, std::time::Duration, - log::info, + tracing::info, polkadot_node_core_av_store::Config as AvailabilityConfig, polkadot_node_core_proposer::ProposerFactory, polkadot_overseer::{AllSubsystems, BlockInfo, Overseer, OverseerHandler}, polkadot_primitives::v1::ParachainHost, - authority_discovery::Service as AuthorityDiscoveryService, + sc_authority_discovery::Service as AuthorityDiscoveryService, sp_blockchain::HeaderBackend, sp_core::traits::SpawnNamed, sp_keystore::SyncCryptoStorePtr, @@ -50,7 +50,7 @@ use service::RpcHandlers; pub use self::client::{AbstractClient, Client, ClientHandle, ExecuteWithClient, RuntimeApiCollection}; pub use chain_spec::{PolkadotChainSpec, KusamaChainSpec, WestendChainSpec, RococoChainSpec}; pub use consensus_common::{Proposal, SelectChain, BlockImport, RecordProof, block_validation::Chain}; -pub use polkadot_parachain::wasm_executor::run_worker as run_validation_worker; +pub use polkadot_parachain::wasm_executor::IsolationStrategy; pub use polkadot_primitives::v1::{Block, BlockId, CollatorId, Hash, Id as ParaId}; pub use sc_client_api::{Backend, ExecutionStrategy, CallExecutor}; pub use sc_consensus::LongestChain; @@ -162,10 +162,7 @@ fn new_partial(config: &mut Configuration) -> Result< grandpa::LinkHalf, FullSelectChain>, babe::BabeLink ), - ( - grandpa::SharedVoterState, - Arc>, - ), + grandpa::SharedVoterState, ) >, Error @@ -219,7 +216,6 @@ fn new_partial(config: &mut Configuration) -> Result< babe_link.clone(), block_import.clone(), Some(Box::new(justification_import)), - None, client.clone(), select_chain.clone(), inherent_data_providers.clone(), @@ -235,7 +231,7 @@ fn new_partial(config: &mut Configuration) -> Result< GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); let import_setup = (block_import.clone(), grandpa_link, babe_link.clone()); - let rpc_setup = (shared_voter_state.clone(), finality_proof_provider.clone()); + let rpc_setup = shared_voter_state.clone(); let babe_config = babe_link.config().clone(); let shared_epoch_changes = babe_link.epoch_changes().clone(); @@ -285,7 +281,33 @@ fn new_partial(config: &mut Configuration) -> Result< }) } -#[cfg(feature="full-node")] +#[cfg(all(feature="full-node", not(feature = "real-overseer")))] +fn real_overseer( + leaves: impl IntoIterator, + _: SyncCryptoStorePtr, + _: Arc, + _: AvailabilityConfig, + _: Arc>, + _: AuthorityDiscoveryService, + registry: Option<&Registry>, + spawner: Spawner, + _: IsCollator, + _: IsolationStrategy, +) -> Result<(Overseer, OverseerHandler), Error> +where + RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend, + RuntimeClient::Api: ParachainHost, + Spawner: 'static + SpawnNamed + Clone + Unpin, +{ + Overseer::new( + leaves, + AllSubsystems::<()>::dummy(), + registry, + spawner, + ).map_err(|e| Error::Other(format!("Failed to create an Overseer: {:?}", e))) +} + +#[cfg(all(feature = "full-node", feature = "real-overseer"))] fn real_overseer( leaves: impl IntoIterator, keystore: SyncCryptoStorePtr, @@ -295,6 +317,8 @@ fn real_overseer( authority_discovery: AuthorityDiscoveryService, registry: Option<&Registry>, spawner: Spawner, + is_collator: IsCollator, + isolation_strategy: IsolationStrategy, ) -> Result<(Overseer, OverseerHandler), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend, @@ -343,12 +367,13 @@ where ), candidate_selection: CandidateSelectionSubsystem::new( spawner.clone(), - (), + keystore.clone(), Metrics::register(registry)?, ), candidate_validation: CandidateValidationSubsystem::new( spawner.clone(), Metrics::register(registry)?, + isolation_strategy, ), chain_api: ChainApiSubsystem::new( runtime_client.clone(), @@ -357,9 +382,15 @@ where collation_generation: CollationGenerationSubsystem::new( Metrics::register(registry)?, ), - collator_protocol: CollatorProtocolSubsystem::new( - ProtocolSide::Validator(Metrics::register(registry)?), - ), + collator_protocol: { + let side = match is_collator { + IsCollator::Yes(id) => ProtocolSide::Collator(id, Metrics::register(registry)?), + IsCollator::No => ProtocolSide::Validator(Metrics::register(registry)?), + }; + CollatorProtocolSubsystem::new( + side, + ) + }, network_bridge: NetworkBridgeSubsystem::new( network_service, authority_discovery, @@ -375,6 +406,7 @@ where runtime_api: RuntimeApiSubsystem::new( runtime_client, Metrics::register(registry)?, + spawner.clone(), ), statement_distribution: StatementDistributionSubsystem::new( Metrics::register(registry)?, @@ -418,10 +450,10 @@ impl NewFull { /// Is this node a collator? #[cfg(feature = "full-node")] -#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum IsCollator { /// This node is a collator. - Yes, + Yes(CollatorId), /// This node is not a collator. No, } @@ -430,7 +462,7 @@ pub enum IsCollator { impl IsCollator { /// Is this a collator? fn is_collator(&self) -> bool { - *self == Self::Yes + matches!(self, Self::Yes(_)) } } @@ -441,9 +473,9 @@ impl IsCollator { #[cfg(feature = "full-node")] pub fn new_full( mut config: Configuration, - authority_discovery_disabled: bool, is_collator: IsCollator, grandpa_pause: Option<(u32, u32)>, + isolation_strategy: IsolationStrategy, ) -> Result>>, Error> where RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static, @@ -453,6 +485,8 @@ pub fn new_full( { let role = config.role.clone(); let force_authoring = config.force_authoring; + let backoff_authoring_blocks = + Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); let disable_grandpa = config.disable_grandpa; let name = config.network.node_name.clone(); @@ -470,7 +504,11 @@ pub fn new_full( let prometheus_registry = config.prometheus_registry().cloned(); - let (shared_voter_state, finality_proof_provider) = rpc_setup; + let shared_voter_state = rpc_setup; + + #[cfg(feature = "real-overseer")] + config.network.notifications_protocols.extend(polkadot_network_bridge::notifications_protocol_info()); + config.network.notifications_protocols.push(grandpa::GRANDPA_PROTOCOL_NAME.into()); let (network, network_status_sinks, system_rpc_tx, network_starter) = service::build_network(service::BuildNetworkParams { @@ -481,8 +519,6 @@ pub fn new_full( import_queue, on_demand: None, block_announce_validator_builder: None, - finality_proof_request_builder: None, - finality_proof_provider: Some(finality_proof_provider.clone()), })?; if config.offchain_worker.enabled { @@ -535,24 +571,20 @@ pub fn new_full( use sc_network::Event; use futures::StreamExt; - if authority_discovery_disabled { - Err("Authority discovery is mandatory for a validator.")?; - } - let authority_discovery_role = if role.is_authority() { - authority_discovery::Role::PublishAndDiscover( + sc_authority_discovery::Role::PublishAndDiscover( keystore_container.keystore(), ) } else { // don't publish our addresses when we're only a collator - authority_discovery::Role::Discover + sc_authority_discovery::Role::Discover }; let dht_event_stream = network.event_stream("authority-discovery") .filter_map(|e| async move { match e { Event::Dht(e) => Some(e), _ => None, }}); - let (worker, service) = authority_discovery::new_worker_and_service( + let (worker, service) = sc_authority_discovery::new_worker_and_service( client.clone(), network.clone(), Box::pin(dht_event_stream), @@ -578,6 +610,8 @@ pub fn new_full( authority_discovery_service, prometheus_registry.as_ref(), spawner, + is_collator, + isolation_strategy, )?; let overseer_handler_clone = overseer_handler.clone(); @@ -623,6 +657,7 @@ pub fn new_full( sync_oracle: network.clone(), inherent_data_providers: inherent_data_providers.clone(), force_authoring, + backoff_authoring_blocks, babe_link, can_author_with, }; @@ -663,7 +698,10 @@ pub fn new_full( // given delay. let voting_rule = match grandpa_pause { Some((block, delay)) => { - info!("GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", + info!( + block_number = %block, + delay = %delay, + "GRANDPA scheduled voting pause set for block #{} with a duration of {} blocks.", block, delay, ); @@ -689,8 +727,6 @@ pub fn new_full( "grandpa-voter", grandpa::run_grandpa_voter(grandpa_config)? ); - } else { - grandpa::setup_disabled_grandpa(network.clone())?; } network_starter.start_network(); @@ -730,16 +766,12 @@ fn new_light(mut config: Configuration) -> Result<(TaskManage on_demand.clone(), )); - let grandpa_block_import = grandpa::light_block_import( + let (grandpa_block_import, _) = grandpa::block_import( client.clone(), - backend.clone(), &(client.clone() as Arc<_>), - Arc::new(on_demand.checker().clone()), + select_chain.clone(), )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); + let justification_import = grandpa_block_import.clone(); let (babe_block_import, babe_link) = babe::block_import( babe::Config::get_or_compute(&*client)?, @@ -753,8 +785,7 @@ fn new_light(mut config: Configuration) -> Result<(TaskManage let import_queue = babe::import_queue( babe_link, babe_block_import, - None, - Some(Box::new(finality_proof_import)), + Some(Box::new(justification_import)), client.clone(), select_chain.clone(), inherent_data_providers.clone(), @@ -763,9 +794,6 @@ fn new_light(mut config: Configuration) -> Result<(TaskManage consensus_common::NeverCanAuthor, )?; - let finality_proof_provider = - GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); - let (network, network_status_sinks, system_rpc_tx, network_starter) = service::build_network(service::BuildNetworkParams { config: &config, @@ -775,8 +803,6 @@ fn new_light(mut config: Configuration) -> Result<(TaskManage import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, - finality_proof_request_builder: Some(finality_proof_request_builder), - finality_proof_provider: Some(finality_proof_provider), })?; if config.offchain_worker.enabled { @@ -867,37 +893,36 @@ pub fn build_light(config: Configuration) -> Result<(TaskManager, RpcHandlers), #[cfg(feature = "full-node")] pub fn build_full( config: Configuration, - authority_discovery_disabled: bool, is_collator: IsCollator, grandpa_pause: Option<(u32, u32)>, ) -> Result, Error> { if config.chain_spec.is_rococo() { new_full::( config, - authority_discovery_disabled, is_collator, grandpa_pause, + Default::default(), ).map(|full| full.with_client(Client::Rococo)) } else if config.chain_spec.is_kusama() { new_full::( config, - authority_discovery_disabled, is_collator, grandpa_pause, + Default::default(), ).map(|full| full.with_client(Client::Kusama)) } else if config.chain_spec.is_westend() { new_full::( config, - authority_discovery_disabled, is_collator, grandpa_pause, + Default::default(), ).map(|full| full.with_client(Client::Westend)) } else { new_full::( config, - authority_discovery_disabled, is_collator, grandpa_pause, + Default::default(), ).map(|full| full.with_client(Client::Polkadot)) } } diff --git a/node/subsystem-test-helpers/Cargo.toml b/node/subsystem-test-helpers/Cargo.toml index 24f49ec3a1c7c40b6d3ab2b3e30915abeb8bfd72..bd3318419cb1fc255a661503fad8c5585664ed6f 100644 --- a/node/subsystem-test-helpers/Cargo.toml +++ b/node/subsystem-test-helpers/Cargo.toml @@ -6,21 +6,22 @@ edition = "2018" description = "Subsystem traits and message definitions" [dependencies] -async-trait = "0.1" -futures = "0.3.5" +async-trait = "0.1.42" +futures = "0.3.8" futures-timer = "3.0.2" -log = "0.4.8" -parity-scale-codec = "1.3.4" -parking_lot = "0.10.0" -pin-project = "0.4.23" +tracing = "0.1.22" +tracing-futures = "0.2.4" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +parking_lot = "0.11.1" +pin-project = "1.0.2" polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } polkadot-node-subsystem-util = { path = "../subsystem-util" } polkadot-primitives = { path = "../../primitives" } polkadot-statement-table = { path = "../../statement-table" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -smallvec = "1.4.1" -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +smallvec = "1.5.1" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } [dev-dependencies] polkadot-overseer = { path = "../overseer" } diff --git a/node/subsystem-test-helpers/src/lib.rs b/node/subsystem-test-helpers/src/lib.rs index 0c9c8b05608b8431460a44a61159a8969fce2498..512d761afed17797f40622e5dd97583a5ffeeda3 100644 --- a/node/subsystem-test-helpers/src/lib.rs +++ b/node/subsystem-test-helpers/src/lib.rs @@ -191,15 +191,14 @@ impl SubsystemContext Ok(()) } - async fn send_message(&mut self, msg: AllMessages) -> SubsystemResult<()> { + async fn send_message(&mut self, msg: AllMessages) { self.tx .send(msg) .await .expect("test overseer no longer live"); - Ok(()) } - async fn send_messages(&mut self, msgs: T) -> SubsystemResult<()> + async fn send_messages(&mut self, msgs: T) where T: IntoIterator + Send, T::IntoIter: Send, @@ -209,8 +208,6 @@ impl SubsystemContext .send_all(&mut iter) .await .expect("test overseer no longer live"); - - Ok(()) } } @@ -303,11 +300,11 @@ impl, Msg: Send + 'static> Subsystem for F let future = Box::pin(async move { loop { match ctx.recv().await { - Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return, + Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return Ok(()), Ok(FromOverseer::Communication { msg }) => { let _ = self.0.send(msg).await; }, - Err(_) => return, + Err(_) => return Ok(()), _ => (), } } @@ -341,7 +338,7 @@ mod tests { spawner.spawn("overseer", overseer.run().then(|_| async { () }).boxed()); - block_on(handler.send_msg(CandidateSelectionMessage::Invalid(Default::default(), Default::default()))).unwrap(); + block_on(handler.send_msg(CandidateSelectionMessage::Invalid(Default::default(), Default::default()))); assert!(matches!(block_on(rx.into_future()).0.unwrap(), CandidateSelectionMessage::Invalid(_, _))); } } diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index f03cea13482786b70bdca87bee85def153f9b27d..592f979d93ab3a8be273fb155731ce1a0172d331 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -6,30 +6,32 @@ edition = "2018" description = "Subsystem traits and message definitions" [dependencies] -async-trait = "0.1" -futures = "0.3.5" +async-trait = "0.1.42" +futures = "0.3.8" futures-timer = "3.0.2" -log = "0.4.11" -thiserror = "1.0.21" -parity-scale-codec = "1.3.4" -parking_lot = { version = "0.10.0", optional = true } -pin-project = "0.4.22" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +parking_lot = { version = "0.11.1", optional = true } +pin-project = "1.0.2" streamunordered = "0.5.1" +thiserror = "1.0.22" +tracing = "0.1.22" +tracing-futures = "0.2.4" polkadot-node-primitives = { path = "../primitives" } polkadot-node-subsystem = { path = "../subsystem" } polkadot-primitives = { path = "../../primitives" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } [dev-dependencies] -assert_matches = "1.3.0" -async-trait = "0.1" -futures = { version = "0.3.5", features = ["thread-pool"] } -parking_lot = "0.10.0" +assert_matches = "1.4.0" +async-trait = "0.1.42" +env_logger = "0.8.2" +futures = { version = "0.3.8", features = ["thread-pool"] } +log = "0.4.11" +parking_lot = "0.11.1" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } -env_logger = "0.7.1" diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index c642f08a3c0afe7a6e3a16cb6a1d0afe57206919..9cc68834be956b4ad939847a1c5582da60a861ed 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -22,31 +22,22 @@ //! //! This crate also reexports Prometheus metric types which are expected to be implemented by subsystems. -#![deny(unused_results)] -// #![deny(unused_crate_dependencies] causes false positives -// https://github.com/rust-lang/rust/issues/57274 #![warn(missing_docs)] use polkadot_node_subsystem::{ - errors::{ChainApiError, RuntimeApiError}, - messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender}, + errors::RuntimeApiError, + messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest, RuntimeApiSender, BoundToRelayParent}, FromOverseer, SpawnedSubsystem, Subsystem, SubsystemContext, SubsystemError, SubsystemResult, }; -use futures::{ - channel::{mpsc, oneshot}, - future::Either, - prelude::*, - select, - stream::Stream, - task, -}; +use futures::{channel::{mpsc, oneshot}, prelude::*, select, stream::Stream}; use futures_timer::Delay; use parity_scale_codec::Encode; -use pin_project::{pin_project, pinned_drop}; +use pin_project::pin_project; use polkadot_primitives::v1::{ CandidateEvent, CommittedCandidateReceipt, CoreState, EncodeAs, PersistedValidationData, GroupRotationInfo, Hash, Id as ParaId, ValidationData, OccupiedCoreAssumption, SessionIndex, Signed, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, + SessionInfo, }; use sp_core::{ traits::SpawnNamed, @@ -59,12 +50,13 @@ use sp_keystore::{ Error as KeystoreError, }; use std::{ - collections::HashMap, + collections::{HashMap, hash_map::Entry}, convert::{TryFrom, TryInto}, marker::Unpin, pin::Pin, task::{Poll, Context}, time::Duration, + fmt, }; use streamunordered::{StreamUnordered, StreamYield}; use thiserror::Error; @@ -81,7 +73,6 @@ pub mod reexports { }; } - /// Duration a job will wait after sending a stop signal before hard-aborting. pub const JOB_GRACEFUL_STOP_DURATION: Duration = Duration::from_secs(1); /// Capacity of channels to and from individual jobs @@ -99,9 +90,6 @@ pub enum Error { /// A subsystem error #[error(transparent)] Subsystem(#[from] SubsystemError), - /// An error in the Chain API. - #[error(transparent)] - ChainApi(#[from] ChainApiError), /// An error in the Runtime API. #[error(transparent)] RuntimeApi(#[from] RuntimeApiError), @@ -114,9 +102,6 @@ pub enum Error { /// The local node is not a validator. #[error("Node is not a validator")] NotAValidator, - /// The desired job is not present in the jobs list. - #[error("Relay parent {0} not of interest")] - JobNotFound(Hash), /// Already forwarding errors to another sender #[error("AlreadyForwarding")] AlreadyForwarding, @@ -133,18 +118,11 @@ pub async fn request_from_runtime( ) -> Result, Error> where RequestBuilder: FnOnce(RuntimeApiSender) -> RuntimeApiRequest, - FromJob: TryFrom, - >::Error: std::fmt::Debug, + FromJob: From, { let (tx, rx) = oneshot::channel(); - sender - .send( - AllMessages::RuntimeApi(RuntimeApiMessage::Request(parent, request_builder(tx))) - .try_into() - .map_err(|err| Error::SenderConversion(format!("{:?}", err)))?, - ) - .await?; + sender.send(AllMessages::RuntimeApi(RuntimeApiMessage::Request(parent, request_builder(tx))).into()).await?; Ok(rx) } @@ -173,8 +151,7 @@ macro_rules! specialize_requests { sender: &mut mpsc::Sender, ) -> Result, Error> where - FromJob: TryFrom, - >::Error: std::fmt::Debug, + FromJob: From, { request_from_runtime(parent, sender, |tx| RuntimeApiRequest::$request_variant( $( $param_name, )* tx @@ -210,6 +187,7 @@ specialize_requests! { fn request_validation_code(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCode; fn request_candidate_pending_availability(para_id: ParaId) -> Option; CandidatePendingAvailability; fn request_candidate_events() -> Vec; CandidateEvents; + fn request_session_info(index: SessionIndex) -> Option; SessionInfo; } /// Request some data from the `RuntimeApi` via a SubsystemContext. @@ -224,13 +202,11 @@ where { let (tx, rx) = oneshot::channel(); - ctx - .send_message( - AllMessages::RuntimeApi(RuntimeApiMessage::Request(parent, request_builder(tx))) - .try_into() - .map_err(|err| Error::SenderConversion(format!("{:?}", err)))?, - ) - .await?; + ctx.send_message( + AllMessages::RuntimeApi(RuntimeApiMessage::Request(parent, request_builder(tx))) + .try_into() + .map_err(|err| Error::SenderConversion(format!("{:?}", err)))?, + ).await; Ok(rx) } @@ -293,6 +269,7 @@ specialize_requests_ctx! { fn request_validation_code_ctx(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCode; fn request_candidate_pending_availability_ctx(para_id: ParaId) -> Option; CandidatePendingAvailability; fn request_candidate_events_ctx() -> Vec; CandidateEvents; + fn request_session_info_ctx(index: SessionIndex) -> Option; SessionInfo; } /// From the given set of validators, find the first key we can sign with, if any. @@ -309,6 +286,7 @@ pub async fn signing_key(validators: &[ValidatorId], keystore: SyncCryptoStorePt /// /// It can be created if the local node is a validator in the context of a particular /// relay chain block. +#[derive(Debug)] pub struct Validator { signing_context: SigningContext, key: ValidatorId, @@ -323,8 +301,7 @@ impl Validator { mut sender: mpsc::Sender, ) -> Result where - FromJob: TryFrom, - >::Error: std::fmt::Debug, + FromJob: From, { // Note: request_validators and request_session_index_for_child do not and cannot // run concurrently: they both have a mutable handle to the same sender. @@ -407,23 +384,18 @@ impl Validator { } } -/// ToJob is expected to be an enum declaring the set of messages of interest to a particular job. -/// -/// Normally, this will be some subset of `Allmessages`, and a `Stop` variant. -pub trait ToJobTrait: TryFrom { - /// The `Stop` variant of the ToJob enum. - const STOP: Self; +struct AbortOnDrop(future::AbortHandle); - /// If the message variant contains its relay parent, return it here - fn relay_parent(&self) -> Option; +impl Drop for AbortOnDrop { + fn drop(&mut self) { + self.0.abort(); + } } /// A JobHandle manages a particular job for a subsystem. struct JobHandle { - abort_handle: future::AbortHandle, + _abort_handle: AbortOnDrop, to_job: mpsc::Sender, - finished: oneshot::Receiver<()>, - outgoing_msgs_handle: usize, } impl JobHandle { @@ -433,34 +405,12 @@ impl JobHandle { } } -impl JobHandle { - /// Stop this job gracefully. - /// - /// If it hasn't shut itself down after `JOB_GRACEFUL_STOP_DURATION`, abort it. - async fn stop(mut self) { - // we don't actually care if the message couldn't be sent - if let Err(_) = self.to_job.send(ToJob::STOP).await { - // no need to wait further here: the job is either stalled or - // disconnected, and in either case, we can just abort it immediately - self.abort_handle.abort(); - return; - } - let stop_timer = Delay::new(JOB_GRACEFUL_STOP_DURATION); - - match future::select(stop_timer, self.finished).await { - Either::Left((_, _)) => {} - Either::Right((_, _)) => { - self.abort_handle.abort(); - } - } - } -} - /// This module reexports Prometheus types and defines the [`Metrics`] trait. pub mod metrics { - /// Reexport Prometheus types. + /// Reexport Substrate Prometheus types. pub use substrate_prometheus_endpoint as prometheus; + /// Subsystem- or job-specific Prometheus metrics. /// /// Usually implemented as a wrapper for `Option` @@ -490,16 +440,39 @@ pub mod metrics { } } +/// Commands from a job to the broader subsystem. +pub enum FromJobCommand { + /// Send a message to another subsystem. + SendMessage(AllMessages), + /// Spawn a child task on the executor. + Spawn(&'static str, Pin + Send>>), + /// Spawn a blocking child task on the executor's dedicated thread pool. + SpawnBlocking(&'static str, Pin + Send>>), +} + +impl fmt::Debug for FromJobCommand { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::SendMessage(msg) => write!(fmt, "FromJobCommand::SendMessage({:?})", msg), + Self::Spawn(name, _) => write!(fmt, "FromJobCommand::Spawn({})", name), + Self::SpawnBlocking(name, _) => write!(fmt, "FromJobCommand::SpawnBlocking({})", name), + } + } +} + +impl From for FromJobCommand { + fn from(msg: AllMessages) -> Self { + Self::SendMessage(msg) + } +} + /// This trait governs jobs. /// /// Jobs are instantiated and killed automatically on appropriate overseer messages. -/// Other messages are passed along to and from the job via the overseer to other -/// subsystems. +/// Other messages are passed along to and from the job via the overseer to other subsystems. pub trait JobTrait: Unpin { - /// Message type to the job. Typically a subset of AllMessages. - type ToJob: 'static + ToJobTrait + Send; - /// Message type from the job. Typically a subset of AllMessages. - type FromJob: 'static + Into + Send; + /// Message type used to send messages to the job. + type ToJob: 'static + BoundToRelayParent + Send; /// Job runtime error. type Error: 'static + std::error::Error + Send; /// Extra arguments this job needs to run properly. @@ -516,28 +489,16 @@ pub trait JobTrait: Unpin { /// Name of the job, i.e. `CandidateBackingJob` const NAME: &'static str; - /// Run a job for the parent block indicated + /// Run a job for the given relay `parent`. + /// + /// The job should be ended when `receiver` returns `None`. fn run( parent: Hash, run_args: Self::RunArgs, metrics: Self::Metrics, receiver: mpsc::Receiver, - sender: mpsc::Sender, + sender: mpsc::Sender, ) -> Pin> + Send>>; - - /// Handle a message which has no relay parent, and therefore can't be dispatched to a particular job - /// - /// By default, this is implemented with a NOP function. However, if - /// ToJob occasionally has messages which do not correspond to a particular - /// parent relay hash, then this function will be spawned as a one-off - /// task to handle those messages. - // TODO: the API here is likely not precisely what we want; figure it out more - // once we're implementing a subsystem which actually needs this feature. - // In particular, we're quite likely to want this to return a future instead of - // interrupting the active thread for the duration of the handler. - fn handle_unanchored_msg(_msg: Self::ToJob) -> Result<(), Self::Error> { - Ok(()) - } } /// Error which can be returned by the jobs manager @@ -560,12 +521,12 @@ pub enum JobsError { /// - Dispatches messages to the appropriate job for a given relay-parent. /// - When dropped, aborts all remaining jobs. /// - implements `Stream`, collecting all messages from subordinate jobs. -#[pin_project(PinnedDrop)] +#[pin_project] pub struct Jobs { spawner: Spawner, running: HashMap>, + outgoing_msgs: StreamUnordered>, #[pin] - outgoing_msgs: StreamUnordered>, job: std::marker::PhantomData, errors: Option, JobsError)>>, } @@ -603,118 +564,92 @@ impl Jobs { fn spawn_job(&mut self, parent_hash: Hash, run_args: Job::RunArgs, metrics: Job::Metrics) -> Result<(), Error> { let (to_job_tx, to_job_rx) = mpsc::channel(JOB_CHANNEL_CAPACITY); let (from_job_tx, from_job_rx) = mpsc::channel(JOB_CHANNEL_CAPACITY); - let (finished_tx, finished) = oneshot::channel(); - // clone the error transmitter to move into the future let err_tx = self.errors.clone(); let (future, abort_handle) = future::abortable(async move { if let Err(e) = Job::run(parent_hash, run_args, metrics, to_job_rx, from_job_tx).await { - log::error!( - "{}({}) finished with an error {:?}", - Job::NAME, - parent_hash, - e, + tracing::error!( + job = Job::NAME, + parent_hash = %parent_hash, + err = ?e, + "job finished with an error", ); if let Some(mut err_tx) = err_tx { // if we can't send the notification of error on the error channel, then // there's no point trying to propagate this error onto the channel too - // all we can do is warn that error propagatio has failed + // all we can do is warn that error propagation has failed if let Err(e) = err_tx.send((Some(parent_hash), JobsError::Job(e))).await { - log::warn!("failed to forward error: {:?}", e); + tracing::warn!(err = ?e, "failed to forward error"); } } } }); - // the spawn mechanism requires that the spawned future has no output - let future = async move { - // job errors are already handled within the future, meaning - // that any errors here are due to the abortable mechanism. - // failure to abort isn't of interest. - let _ = future.await; - // transmission failure here is only possible if the receiver is closed, - // which means the handle is dropped, which means we don't care anymore - let _ = finished_tx.send(()); - }; - self.spawner.spawn(Job::NAME, future.boxed()); + self.spawner.spawn(Job::NAME, future.map(drop).boxed()); - // this handle lets us remove the appropriate receiver from self.outgoing_msgs - // when it's time to stop the job. - let outgoing_msgs_handle = self.outgoing_msgs.push(from_job_rx); + self.outgoing_msgs.push(from_job_rx); let handle = JobHandle { - abort_handle, + _abort_handle: AbortOnDrop(abort_handle), to_job: to_job_tx, - finished, - outgoing_msgs_handle, }; - let _ = self.running.insert(parent_hash, handle); + self.running.insert(parent_hash, handle); Ok(()) } /// Stop the job associated with this `parent_hash`. - pub async fn stop_job(&mut self, parent_hash: Hash) -> Result<(), Error> { - match self.running.remove(&parent_hash) { - Some(handle) => { - let _ = Pin::new(&mut self.outgoing_msgs).remove(handle.outgoing_msgs_handle); - handle.stop().await; - Ok(()) - } - None => Err(Error::JobNotFound(parent_hash)), - } + pub async fn stop_job(&mut self, parent_hash: Hash) { + self.running.remove(&parent_hash); } /// Send a message to the appropriate job for this `parent_hash`. - /// Will not return an error if the job is not running. - async fn send_msg(&mut self, parent_hash: Hash, msg: Job::ToJob) -> Result<(), Error> { - match self.running.get_mut(&parent_hash) { - Some(job) => job.send_msg(msg).await?, - None => { - // don't bring down the subsystem, this can happen to due a race condition - }, + async fn send_msg(&mut self, parent_hash: Hash, msg: Job::ToJob) { + if let Entry::Occupied(mut job) = self.running.entry(parent_hash) { + if job.get_mut().send_msg(msg).await.is_err() { + job.remove(); + } } - Ok(()) } } -// Note that on drop, we don't have the chance to gracefully spin down each of the remaining handles; -// we just abort them all. Still better than letting them dangle. -#[pinned_drop] -impl PinnedDrop for Jobs { - fn drop(self: Pin<&mut Self>) { - for job_handle in self.running.values() { - job_handle.abort_handle.abort(); +impl Stream for Jobs +where + Spawner: SpawnNamed, + Job: JobTrait, +{ + type Item = FromJobCommand; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + loop { + match Pin::new(&mut self.outgoing_msgs).poll_next(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(r) => match r.map(|v| v.0) { + Some(StreamYield::Item(msg)) => return Poll::Ready(Some(msg)), + // If a job is finished, rerun the loop + Some(StreamYield::Finished(_)) => continue, + // Don't end if there are no jobs running + None => return Poll::Pending, + } + } } } } -impl Stream for Jobs +impl stream::FusedStream for Jobs where Spawner: SpawnNamed, Job: JobTrait, { - type Item = Job::FromJob; - - fn poll_next(self: Pin<&mut Self>, cx: &mut task::Context) -> task::Poll> { - // pin-project the outgoing messages - let result = self.project().outgoing_msgs.poll_next(cx).map(|opt| { - opt.and_then(|(stream_yield, _)| match stream_yield { - StreamYield::Item(msg) => Some(msg), - StreamYield::Finished(_) => None, - }) - }); - // we don't want the stream to end if the jobs are empty at some point - match result { - task::Poll::Ready(None) => task::Poll::Pending, - otherwise => otherwise, - } + fn is_terminated(&self) -> bool { + false } } + /// A basic implementation of a subsystem. /// /// This struct is responsible for handling message traffic between @@ -736,7 +671,7 @@ where Context: SubsystemContext, Job: 'static + JobTrait, Job::RunArgs: Clone, - Job::ToJob: TryFrom + TryFrom<::Message> + Sync, + Job::ToJob: From<::Message> + Sync, { /// Creates a new `Subsystem`. pub fn new(spawner: Spawner, run_args: Job::RunArgs, metrics: Job::Metrics) -> Self { @@ -793,14 +728,27 @@ where loop { select! { - incoming = ctx.recv().fuse() => if Self::handle_incoming(incoming, &mut jobs, &run_args, &metrics, &mut err_tx).await { break }, - outgoing = jobs.next().fuse() => Self::handle_outgoing(outgoing, &mut ctx, &mut err_tx).await, + incoming = ctx.recv().fuse() => + if Self::handle_incoming( + incoming, + &mut jobs, + &run_args, + &metrics, + &mut err_tx, + ).await { + break + }, + outgoing = jobs.next() => { + if let Err(e) = Self::handle_from_job(outgoing, &mut ctx).await { + tracing::warn!(err = ?e, "failed to handle command from job"); + } + } complete => break, } } } - // if we have a channel on which to forward errors, do so + /// Forward a given error to the higher context using the given error channel. async fn fwd_err( hash: Option, err: JobsError, @@ -810,12 +758,14 @@ where // if we can't send on the error transmission channel, we can't do anything useful about it // still, we can at least log the failure if let Err(e) = err_tx.send((hash, err)).await { - log::warn!("failed to forward error: {:?}", e); + tracing::warn!(err = ?e, "failed to forward error"); } } } - // handle an incoming message. return true if we should break afterwards. + /// Handle an incoming message. + /// + /// Returns `true` when this job manager should shutdown. async fn handle_incoming( incoming: SubsystemResult>, jobs: &mut Jobs, @@ -835,92 +785,55 @@ where for hash in activated { let metrics = metrics.clone(); if let Err(e) = jobs.spawn_job(hash, run_args.clone(), metrics) { - log::error!("Failed to spawn a job: {:?}", e); - let e = JobsError::Utility(e); - Self::fwd_err(Some(hash), e, err_tx).await; + tracing::error!( + job = Job::NAME, + err = ?e, + "failed to spawn a job", + ); + Self::fwd_err(Some(hash), JobsError::Utility(e), err_tx).await; return true; } } for hash in deactivated { - if let Err(e) = jobs.stop_job(hash).await { - log::error!("Failed to stop a job: {:?}", e); - let e = JobsError::Utility(e); - Self::fwd_err(Some(hash), e, err_tx).await; - return true; - } + jobs.stop_job(hash).await; } } Ok(Signal(Conclude)) => { - // Breaking the loop ends fn run, which drops `jobs`, which immediately drops all ongoing work. - // We can afford to wait a little while to shut them all down properly before doing that. - // - // Forwarding the stream to a drain means we wait until all of the items in the stream - // have completed. Contrast with `into_future`, which turns it into a future of `(head, rest_stream)`. - use futures::sink::drain; - use futures::stream::FuturesUnordered; - use futures::stream::StreamExt; - - if let Err(e) = jobs - .running - .drain() - .map(|(_, handle)| handle.stop()) - .collect::>() - .map(Ok) - .forward(drain()) - .await - { - log::error!("failed to stop all jobs on conclude signal: {:?}", e); - let e = Error::from(e); - Self::fwd_err(None, JobsError::Utility(e), err_tx).await; - } - + jobs.running.clear(); return true; } Ok(Communication { msg }) => { if let Ok(to_job) = ::try_from(msg) { - match to_job.relay_parent() { - Some(hash) => { - if let Err(err) = jobs.send_msg(hash, to_job).await { - log::error!("Failed to send a message to a job: {:?}", err); - let e = JobsError::Utility(err); - Self::fwd_err(Some(hash), e, err_tx).await; - return true; - } - } - None => { - if let Err(err) = Job::handle_unanchored_msg(to_job) { - log::error!("Failed to handle unhashed message: {:?}", err); - let e = JobsError::Job(err); - Self::fwd_err(None, e, err_tx).await; - return true; - } - } - } + jobs.send_msg(to_job.relay_parent(), to_job).await; } } Ok(Signal(BlockFinalized(_))) => {} Err(err) => { - log::error!("error receiving message from subsystem context: {:?}", err); - let e = JobsError::Utility(Error::from(err)); - Self::fwd_err(None, e, err_tx).await; + tracing::error!( + job = Job::NAME, + err = ?err, + "error receiving message from subsystem context for job", + ); + Self::fwd_err(None, JobsError::Utility(Error::from(err)), err_tx).await; return true; } } false } - // handle an outgoing message. - async fn handle_outgoing( - outgoing: Option, + // handle a command from a job. + async fn handle_from_job( + outgoing: Option, ctx: &mut Context, - err_tx: &mut Option, JobsError)>>, - ) { - let msg = outgoing.expect("the Jobs stream never ends; qed"); - if let Err(e) = ctx.send_message(msg.into()).await { - let e = JobsError::Utility(e.into()); - Self::fwd_err(None, e, err_tx).await; + ) -> SubsystemResult<()> { + match outgoing.expect("the Jobs stream never ends; qed") { + FromJobCommand::SendMessage(msg) => ctx.send_message(msg).await, + FromJobCommand::Spawn(name, task) => ctx.spawn(name, task).await?, + FromJobCommand::SpawnBlocking(name, task) => ctx.spawn_blocking(name, task).await?, } + + Ok(()) } } @@ -928,10 +841,9 @@ impl Subsystem for JobManager::Message: Into, Job: 'static + JobTrait + Send, Job::RunArgs: Clone + Sync, - Job::ToJob: TryFrom + Sync, + Job::ToJob: From<::Message> + Sync, Job::Metrics: Sync, { fn start(self, ctx: Context) -> SpawnedSubsystem { @@ -942,6 +854,7 @@ where let future = Box::pin(async move { Self::run(ctx, run_args, metrics, spawner, errors).await; + Ok(()) }); SpawnedSubsystem { @@ -1005,7 +918,7 @@ macro_rules! delegated_subsystem { where Spawner: Clone + $crate::reexports::SpawnNamed + Send + Unpin, Context: $crate::reexports::SubsystemContext, - ::Message: Into<$to_job>, + $to_job: From<::Message>, { #[doc = "Creates a new "] #[doc = $subsystem_name] @@ -1016,6 +929,7 @@ macro_rules! delegated_subsystem { } /// Run this subsystem + #[tracing::instrument(skip(ctx, run_args, metrics, spawner), fields(subsystem = $subsystem_name))] pub async fn run(ctx: Context, run_args: $run_args, metrics: $metrics, spawner: Spawner) { >::run(ctx, run_args, metrics, spawner, None).await } @@ -1025,7 +939,7 @@ macro_rules! delegated_subsystem { where Spawner: $crate::reexports::SpawnNamed + Send + Clone + Unpin + 'static, Context: $crate::reexports::SubsystemContext, - ::Message: Into<$to_job>, + $to_job: From<::Message>, { fn start(self, ctx: Context) -> $crate::reexports::SpawnedSubsystem { self.manager.start(ctx) @@ -1080,22 +994,17 @@ impl Future for Timeout { #[cfg(test)] mod tests { - use super::{Error as UtilError, JobManager, JobTrait, JobsError, TimeoutExt, ToJobTrait}; + use super::*; use thiserror::Error; use polkadot_node_subsystem::{ - messages::{AllMessages, CandidateSelectionMessage}, - ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, Subsystem, + messages::{AllMessages, CandidateSelectionMessage}, ActiveLeavesUpdate, FromOverseer, OverseerSignal, + SpawnedSubsystem, }; use assert_matches::assert_matches; - use futures::{ - channel::mpsc, - executor, - stream::{self, StreamExt}, - future, Future, FutureExt, SinkExt, - }; + use futures::{channel::mpsc, executor, StreamExt, future, Future, FutureExt, SinkExt}; use polkadot_primitives::v1::Hash; use polkadot_node_subsystem_test_helpers::{self as test_helpers, make_subsystem_context}; - use std::{collections::HashMap, convert::TryFrom, pin::Pin, time::Duration}; + use std::{pin::Pin, time::Duration}; // basic usage: in a nutshell, when you want to define a subsystem, just focus on what its jobs do; // you can leave the subsystem itself to the job manager. @@ -1106,67 +1015,7 @@ mod tests { // job structs are constructed within JobTrait::run // most will want to retain the sender and receiver, as well as whatever other data they like struct FakeCandidateSelectionJob { - receiver: mpsc::Receiver, - } - - // ToJob implementations require the following properties: - // - // - have a Stop variant (to impl ToJobTrait) - // - impl ToJobTrait - // - impl TryFrom - // - impl From (from SubsystemContext::Message) - // - // Mostly, they are just a type-safe subset of AllMessages that this job is prepared to receive - enum ToJob { - CandidateSelection(CandidateSelectionMessage), - Stop, - } - - impl ToJobTrait for ToJob { - const STOP: Self = ToJob::Stop; - - fn relay_parent(&self) -> Option { - match self { - Self::CandidateSelection(csm) => csm.relay_parent(), - Self::Stop => None, - } - } - } - - impl TryFrom for ToJob { - type Error = (); - - fn try_from(msg: AllMessages) -> Result { - match msg { - AllMessages::CandidateSelection(csm) => Ok(ToJob::CandidateSelection(csm)), - _ => Err(()), - } - } - } - - impl From for ToJob { - fn from(csm: CandidateSelectionMessage) -> ToJob { - ToJob::CandidateSelection(csm) - } - } - - // FromJob must be infallibly convertable into AllMessages. - // - // It exists to be a type-safe subset of AllMessages that this job is specified to send. - // - // Note: the Clone impl here is not generally required; it's just ueful for this test context because - // we include it in the RunArgs - #[derive(Clone)] - enum FromJob { - Test, - } - - impl From for AllMessages { - fn from(from_job: FromJob) -> AllMessages { - match from_job { - FromJob::Test => AllMessages::CandidateSelection(CandidateSelectionMessage::default()), - } - } + receiver: mpsc::Receiver, } // Error will mostly be a wrapper to make the try operator more convenient; @@ -1179,17 +1028,9 @@ mod tests { } impl JobTrait for FakeCandidateSelectionJob { - type ToJob = ToJob; - type FromJob = FromJob; + type ToJob = CandidateSelectionMessage; type Error = Error; - // RunArgs can be anything that a particular job needs supplied from its external context - // in order to create the Job. In this case, they're a hashmap of parents to the mock outputs - // expected from that job. - // - // Note that it's not recommended to use something as heavy as a hashmap in production: the - // RunArgs get cloned so that each job gets its own owned copy. If you need that, wrap it in - // an Arc. Within a testing context, that efficiency is less important. - type RunArgs = HashMap>; + type RunArgs = bool; type Metrics = (); const NAME: &'static str = "FakeCandidateSelectionJob"; @@ -1198,21 +1039,23 @@ mod tests { // // this function is in charge of creating and executing the job's main loop fn run( - parent: Hash, - mut run_args: Self::RunArgs, + _: Hash, + run_args: Self::RunArgs, _metrics: Self::Metrics, - receiver: mpsc::Receiver, - mut sender: mpsc::Sender, + receiver: mpsc::Receiver, + mut sender: mpsc::Sender, ) -> Pin> + Send>> { async move { let job = FakeCandidateSelectionJob { receiver }; - // most jobs will have a request-response cycle at the heart of their run loop. - // however, in this case, we never receive valid messages, so we may as well - // just send all of our (mock) output messages now - let mock_output = run_args.remove(&parent).unwrap_or_default(); - let mut stream = stream::iter(mock_output.into_iter().map(Ok)); - sender.send_all(&mut stream).await?; + if run_args { + sender.send(FromJobCommand::SendMessage( + CandidateSelectionMessage::Invalid( + Default::default(), + Default::default(), + ).into(), + )).await?; + } // it isn't necessary to break run_loop into its own function, // but it's convenient to separate the concerns in this way @@ -1224,12 +1067,12 @@ mod tests { impl FakeCandidateSelectionJob { async fn run_loop(mut self) -> Result<(), Error> { - while let Some(msg) = self.receiver.next().await { - match msg { - ToJob::CandidateSelection(_csm) => { + loop { + match self.receiver.next().await { + Some(_csm) => { unimplemented!("we'd report the collator to the peer set manager here, but that's not implemented yet"); } - ToJob::Stop => break, + None => break, } } @@ -1245,7 +1088,7 @@ mod tests { type OverseerHandle = test_helpers::TestSubsystemContextHandle; fn test_harness>( - run_args: HashMap>, + run_args: bool, test: impl FnOnce(OverseerHandle, mpsc::Receiver<(Option, JobsError)>) -> T, ) { let _ = env_logger::builder() @@ -1276,13 +1119,8 @@ mod tests { #[test] fn starting_and_stopping_job_works() { let relay_parent: Hash = [0; 32].into(); - let mut run_args = HashMap::new(); - let _ = run_args.insert( - relay_parent.clone(), - vec![FromJob::Test], - ); - test_harness(run_args, |mut overseer_handle, err_rx| async move { + test_harness(true, |mut overseer_handle, err_rx| async move { overseer_handle .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::start_work(relay_parent), @@ -1307,38 +1145,11 @@ mod tests { }); } - #[test] - fn stopping_non_running_job_fails() { - let relay_parent: Hash = [0; 32].into(); - let run_args = HashMap::new(); - - test_harness(run_args, |mut overseer_handle, err_rx| async move { - overseer_handle - .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( - ActiveLeavesUpdate::stop_work(relay_parent), - ))) - .await; - - let errs: Vec<_> = err_rx.collect().await; - assert_eq!(errs.len(), 1); - assert_eq!(errs[0].0, Some(relay_parent)); - assert_matches!( - errs[0].1, - JobsError::Utility(UtilError::JobNotFound(match_relay_parent)) if relay_parent == match_relay_parent - ); - }); - } - #[test] fn sending_to_a_non_running_job_do_not_stop_the_subsystem() { let relay_parent = Hash::repeat_byte(0x01); - let mut run_args = HashMap::new(); - let _ = run_args.insert( - relay_parent.clone(), - vec![FromJob::Test], - ); - test_harness(run_args, |mut overseer_handle, err_rx| async move { + test_harness(true, |mut overseer_handle, err_rx| async move { overseer_handle .send(FromOverseer::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::start_work(relay_parent), @@ -1373,7 +1184,7 @@ mod tests { let (context, _) = make_subsystem_context::(pool.clone()); let SpawnedSubsystem { name, .. } = - FakeCandidateSelectionSubsystem::new(pool, HashMap::new(), ()).start(context); + FakeCandidateSelectionSubsystem::new(pool, false, ()).start(context); assert_eq!(name, "FakeCandidateSelection"); } } diff --git a/node/subsystem-util/src/validator_discovery.rs b/node/subsystem-util/src/validator_discovery.rs index ac5bf1d47066147e3c41876fefb23b40ecd19f57..9472d44d40cf917af9935def3f1f202757e6758e 100644 --- a/node/subsystem-util/src/validator_discovery.rs +++ b/node/subsystem-util/src/validator_discovery.rs @@ -20,33 +20,21 @@ use std::collections::HashMap; use std::pin::Pin; use futures::{ - channel::{mpsc, oneshot}, + channel::mpsc, task::{Poll, self}, stream, + StreamExt, }; -use thiserror::Error; +use streamunordered::{StreamUnordered, StreamYield}; use polkadot_node_subsystem::{ - errors::RuntimeApiError, SubsystemError, - messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest, NetworkBridgeMessage}, + errors::RuntimeApiError, + messages::{AllMessages, NetworkBridgeMessage}, SubsystemContext, }; -use polkadot_primitives::v1::{Hash, ValidatorId, AuthorityDiscoveryId}; +use polkadot_primitives::v1::{Hash, ValidatorId, AuthorityDiscoveryId, SessionIndex}; use sc_network::PeerId; - -/// Error when making a request to connect to validators. -#[derive(Debug, Error)] -pub enum Error { - /// Attempted to send or receive on a oneshot channel which had been canceled - #[error(transparent)] - Oneshot(#[from] oneshot::Canceled), - /// A subsystem error. - #[error(transparent)] - Subsystem(#[from] SubsystemError), - /// An error in the Runtime API. - #[error(transparent)] - RuntimeApi(#[from] RuntimeApiError), -} +use crate::Error; /// Utility function to make it easier to connect to validators. pub async fn connect_to_validators( @@ -54,17 +42,42 @@ pub async fn connect_to_validators( relay_parent: Hash, validators: Vec, ) -> Result { - // ValidatorId -> AuthorityDiscoveryId - let (tx, rx) = oneshot::channel(); + let current_index = crate::request_session_index_for_child_ctx(relay_parent, ctx).await?.await??; + connect_to_past_session_validators(ctx, relay_parent, validators, current_index).await +} - ctx.send_message(AllMessages::RuntimeApi( - RuntimeApiMessage::Request( - relay_parent, - RuntimeApiRequest::ValidatorDiscovery(validators.clone(), tx), - ) - )).await?; +/// Utility function to make it easier to connect to validators in the past sessions. +pub async fn connect_to_past_session_validators( + ctx: &mut Context, + relay_parent: Hash, + validators: Vec, + session_index: SessionIndex, +) -> Result { + let session_info = crate::request_session_info_ctx( + relay_parent, + session_index, + ctx, + ).await?.await??; + + let (session_validators, discovery_keys) = match session_info { + Some(info) => (info.validators, info.discovery_keys), + None => return Err(RuntimeApiError::from( + format!("No SessionInfo found for the index {}", session_index) + ).into()), + }; + + let id_to_index = session_validators.iter() + .zip(0usize..) + .collect::>(); + + // We assume the same ordering in authorities as in validators so we can do an index search + let maybe_authorities: Vec<_> = validators.iter() + .map(|id| { + let validator_index = id_to_index.get(&id); + validator_index.and_then(|i| discovery_keys.get(*i).cloned()) + }) + .collect(); - let maybe_authorities = rx.await??; let authorities: Vec<_> = maybe_authorities.iter() .cloned() .filter_map(|id| id) @@ -75,48 +88,131 @@ pub async fn connect_to_validators( .filter_map(|(k, v)| v.map(|v| (v, k))) .collect::>(); - let (connections, revoke) = connect_to_authorities(ctx, authorities).await?; + let connections = connect_to_authorities(ctx, authorities).await; Ok(ConnectionRequest { validator_map, connections, - revoke, }) } async fn connect_to_authorities( ctx: &mut Context, validator_ids: Vec, -) -> Result<(mpsc::Receiver<(AuthorityDiscoveryId, PeerId)>, oneshot::Sender<()>), Error> { +) -> mpsc::Receiver<(AuthorityDiscoveryId, PeerId)> { const PEERS_CAPACITY: usize = 8; - let (revoke_tx, revoke) = oneshot::channel(); let (connected, connected_rx) = mpsc::channel(PEERS_CAPACITY); ctx.send_message(AllMessages::NetworkBridge( NetworkBridgeMessage::ConnectToValidators { validator_ids, connected, - revoke, } - )).await?; + )).await; + + connected_rx +} + +/// Represents a discovered validator. +/// +/// Result of [`ConnectionRequests::next`]. +#[derive(Debug, PartialEq)] +pub struct DiscoveredValidator { + /// The relay parent associated with the connection request that returned a result. + pub relay_parent: Hash, + /// The [`ValidatorId`] that was resolved. + pub validator_id: ValidatorId, + /// The [`PeerId`] associated to the validator id. + pub peer_id: PeerId, +} + +/// Used by [`ConnectionRequests::requests`] to map a [`ConnectionRequest`] item to a [`DiscoveredValidator`]. +struct ConnectionRequestForRelayParent { + request: ConnectionRequest, + relay_parent: Hash, +} + +impl stream::Stream for ConnectionRequestForRelayParent { + type Item = DiscoveredValidator; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut task::Context) -> Poll> { + self.request + .poll_next_unpin(cx) + .map(|r| r.map(|(validator_id, peer_id)| DiscoveredValidator { + validator_id, + peer_id, + relay_parent: self.relay_parent, + })) + } +} + +/// A struct that assists performing multiple concurrent connection requests. +/// +/// This allows concurrent connections to validator sets at different `relay_parents`. +/// Use [`ConnectionRequests::next`] to wait for results of the added connection requests. +#[derive(Default)] +pub struct ConnectionRequests { + /// Connection requests relay_parent -> StreamUnordered token + id_map: HashMap, + + /// Connection requests themselves. + requests: StreamUnordered, +} + +impl ConnectionRequests { + /// Insert a new connection request. + /// + /// If a `ConnectionRequest` under a given `relay_parent` already exists it will + /// be revoked and substituted with the given one. + pub fn put(&mut self, relay_parent: Hash, request: ConnectionRequest) { + self.remove(&relay_parent); + let token = self.requests.push(ConnectionRequestForRelayParent { relay_parent, request }); + + self.id_map.insert(relay_parent, token); + } + + /// Remove a connection request by a given `relay_parent`. + pub fn remove(&mut self, relay_parent: &Hash) { + if let Some(token) = self.id_map.remove(relay_parent) { + Pin::new(&mut self.requests).remove(token); + } + } + + /// Is a connection at this relay parent already present in the request + pub fn contains_request(&self, relay_parent: &Hash) -> bool { + self.id_map.contains_key(relay_parent) + } - Ok((connected_rx, revoke_tx)) + /// Returns the next available connection request result. + /// + /// # Note + /// + /// When there are no active requests this will wait indefinitely, like an always pending future. + pub async fn next(&mut self) -> DiscoveredValidator { + loop { + match self.requests.next().await { + Some((StreamYield::Item(item), _)) => { + return item + }, + // Ignore finished requests, they are required to be removed. + Some((StreamYield::Finished(_), _)) => (), + None => futures::pending!(), + } + } + } } /// A pending connection request to validators. /// This struct implements `Stream` to allow for asynchronous /// discovery of validator addresses. /// -/// NOTE: you should call `revoke` on this struct -/// when you're no longer interested in the requested validators. +/// NOTE: the request will be revoked on drop. #[must_use = "dropping a request will result in its immediate revokation"] pub struct ConnectionRequest { validator_map: HashMap, #[must_use = "streams do nothing unless polled"] connections: mpsc::Receiver<(AuthorityDiscoveryId, PeerId)>, - #[must_use = "a request should be revoked at some point"] - revoke: oneshot::Sender<()>, } impl stream::Stream for ConnectionRequest { @@ -141,18 +237,186 @@ impl stream::Stream for ConnectionRequest { } } -impl ConnectionRequest { - /// By revoking the request the caller allows the network to - /// free some peer slots thus freeing the resources. - /// It doesn't necessarily lead to peers disconnection though. - /// The revokation is enacted on in the next connection request. - /// - /// This can be done either by calling this function or dropping the request. - pub fn revoke(self) { - if let Err(_) = self.revoke.send(()) { - log::warn!( - "Failed to revoke a validator connection request", +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::v1::ValidatorPair; + use sp_core::{Pair, Public}; + + use futures::{executor, poll, SinkExt}; + + async fn check_next_is_pending(connection_requests: &mut ConnectionRequests) { + let next = connection_requests.next(); + futures::pin_mut!(next); + assert_eq!(poll!(next), Poll::Pending); + } + + #[test] + fn adding_a_connection_request_works() { + let mut connection_requests = ConnectionRequests::default(); + + executor::block_on(async move { + check_next_is_pending(&mut connection_requests).await; + + let validator_1 = ValidatorPair::generate().0.public(); + let validator_2 = ValidatorPair::generate().0.public(); + + let auth_1 = AuthorityDiscoveryId::from_slice(&[1; 32]); + let auth_2 = AuthorityDiscoveryId::from_slice(&[2; 32]); + + let mut validator_map = HashMap::new(); + validator_map.insert(auth_1.clone(), validator_1.clone()); + validator_map.insert(auth_2.clone(), validator_2.clone()); + + let (mut rq1_tx, rq1_rx) = mpsc::channel(8); + + let peer_id_1 = PeerId::random(); + let peer_id_2 = PeerId::random(); + + let connection_request_1 = ConnectionRequest { + validator_map, + connections: rq1_rx, + }; + + let relay_parent_1 = Hash::repeat_byte(1); + + connection_requests.put(relay_parent_1.clone(), connection_request_1); + + rq1_tx.send((auth_1, peer_id_1.clone())).await.unwrap(); + rq1_tx.send((auth_2, peer_id_2.clone())).await.unwrap(); + + let res = connection_requests.next().await; + assert_eq!( + res, + DiscoveredValidator { relay_parent: relay_parent_1, validator_id: validator_1, peer_id: peer_id_1 }, ); - } + + let res = connection_requests.next().await; + assert_eq!( + res, + DiscoveredValidator { relay_parent: relay_parent_1, validator_id: validator_2, peer_id: peer_id_2 }, + ); + + check_next_is_pending(&mut connection_requests).await; + }); + } + + #[test] + fn adding_two_connection_requests_works() { + let mut connection_requests = ConnectionRequests::default(); + + executor::block_on(async move { + check_next_is_pending(&mut connection_requests).await; + + let validator_1 = ValidatorPair::generate().0.public(); + let validator_2 = ValidatorPair::generate().0.public(); + + let auth_1 = AuthorityDiscoveryId::from_slice(&[1; 32]); + let auth_2 = AuthorityDiscoveryId::from_slice(&[2; 32]); + + let mut validator_map_1 = HashMap::new(); + let mut validator_map_2 = HashMap::new(); + + validator_map_1.insert(auth_1.clone(), validator_1.clone()); + validator_map_2.insert(auth_2.clone(), validator_2.clone()); + + let (mut rq1_tx, rq1_rx) = mpsc::channel(8); + + let (mut rq2_tx, rq2_rx) = mpsc::channel(8); + + let peer_id_1 = PeerId::random(); + let peer_id_2 = PeerId::random(); + + let connection_request_1 = ConnectionRequest { + validator_map: validator_map_1, + connections: rq1_rx, + }; + + let connection_request_2 = ConnectionRequest { + validator_map: validator_map_2, + connections: rq2_rx, + }; + + let relay_parent_1 = Hash::repeat_byte(1); + let relay_parent_2 = Hash::repeat_byte(2); + + connection_requests.put(relay_parent_1.clone(), connection_request_1); + connection_requests.put(relay_parent_2.clone(), connection_request_2); + + rq1_tx.send((auth_1, peer_id_1.clone())).await.unwrap(); + rq2_tx.send((auth_2, peer_id_2.clone())).await.unwrap(); + + let res = connection_requests.next().await; + assert_eq!( + res, + DiscoveredValidator { relay_parent: relay_parent_1, validator_id: validator_1, peer_id: peer_id_1 }, + ); + + let res = connection_requests.next().await; + assert_eq!( + res, + DiscoveredValidator { relay_parent: relay_parent_2, validator_id: validator_2, peer_id: peer_id_2 }, + ); + + check_next_is_pending(&mut connection_requests).await; + }); + } + + #[test] + fn replacing_a_connection_request_works() { + let mut connection_requests = ConnectionRequests::default(); + + executor::block_on(async move { + check_next_is_pending(&mut connection_requests).await; + + let validator_1 = ValidatorPair::generate().0.public(); + let validator_2 = ValidatorPair::generate().0.public(); + + let auth_1 = AuthorityDiscoveryId::from_slice(&[1; 32]); + let auth_2 = AuthorityDiscoveryId::from_slice(&[2; 32]); + + let mut validator_map_1 = HashMap::new(); + let mut validator_map_2 = HashMap::new(); + + validator_map_1.insert(auth_1.clone(), validator_1.clone()); + validator_map_2.insert(auth_2.clone(), validator_2.clone()); + + let (mut rq1_tx, rq1_rx) = mpsc::channel(8); + + let (mut rq2_tx, rq2_rx) = mpsc::channel(8); + + let peer_id_1 = PeerId::random(); + let peer_id_2 = PeerId::random(); + + let connection_request_1 = ConnectionRequest { + validator_map: validator_map_1, + connections: rq1_rx, + }; + + let connection_request_2 = ConnectionRequest { + validator_map: validator_map_2, + connections: rq2_rx, + }; + + let relay_parent = Hash::repeat_byte(3); + + connection_requests.put(relay_parent.clone(), connection_request_1); + + rq1_tx.send((auth_1.clone(), peer_id_1.clone())).await.unwrap(); + + let res = connection_requests.next().await; + assert_eq!(res, DiscoveredValidator { relay_parent, validator_id: validator_1, peer_id: peer_id_1.clone() }); + + connection_requests.put(relay_parent.clone(), connection_request_2); + + assert!(rq1_tx.send((auth_1, peer_id_1.clone())).await.is_err()); + + rq2_tx.send((auth_2, peer_id_2.clone())).await.unwrap(); + + let res = connection_requests.next().await; + assert_eq!(res, DiscoveredValidator { relay_parent, validator_id: validator_2, peer_id: peer_id_2 }); + + check_next_is_pending(&mut connection_requests).await; + }); } } diff --git a/node/subsystem/Cargo.toml b/node/subsystem/Cargo.toml index f578d98633b6e5557a4ecdbd382a5951a3d1f64d..0fb2eaaff66983bd9efe87c6da5739e07f6148b6 100644 --- a/node/subsystem/Cargo.toml +++ b/node/subsystem/Cargo.toml @@ -6,27 +6,28 @@ edition = "2018" description = "Subsystem traits and message definitions" [dependencies] -async-trait = "0.1" +async-trait = "0.1.42" derive_more = "0.99.11" -futures = "0.3.5" +futures = "0.3.8" futures-timer = "3.0.2" -log = "0.4.8" -parity-scale-codec = "1.3.4" -parking_lot = { version = "0.10.0", optional = true } -pin-project = "0.4.22" +tracing = "0.1.22" +tracing-futures = "0.2.4" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +parking_lot = { version = "0.11.1", optional = true } +pin-project = "1.0.2" polkadot-node-primitives = { path = "../primitives" } polkadot-node-network-protocol = { path = "../network/protocol" } polkadot-primitives = { path = "../../primitives" } polkadot-statement-table = { path = "../../statement-table" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -smallvec = "1.4.1" -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.21" +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +smallvec = "1.5.1" +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +substrate-prometheus-endpoint = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +thiserror = "1.0.22" [dev-dependencies] -assert_matches = "1.3.0" -async-trait = "0.1" -futures = { version = "0.3.5", features = ["thread-pool"] } -parking_lot = "0.10.0" +assert_matches = "1.4.0" +async-trait = "0.1.42" +futures = { version = "0.3.8", features = ["thread-pool"] } +parking_lot = "0.11.1" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } diff --git a/node/subsystem/src/lib.rs b/node/subsystem/src/lib.rs index ce06310458400151048a0dafa43b357759a972b7..143c35d2c07c6ce5a8baef887f8c64d925b5a02c 100644 --- a/node/subsystem/src/lib.rs +++ b/node/subsystem/src/lib.rs @@ -58,12 +58,17 @@ pub struct ActiveLeavesUpdate { impl ActiveLeavesUpdate { /// Create a ActiveLeavesUpdate with a single activated hash pub fn start_work(hash: Hash) -> Self { - Self { activated: [hash].as_ref().into(), ..Default::default() } + Self { activated: [hash][..].into(), ..Default::default() } } /// Create a ActiveLeavesUpdate with a single deactivated hash pub fn stop_work(hash: Hash) -> Self { - Self { deactivated: [hash].as_ref().into(), ..Default::default() } + Self { deactivated: [hash][..].into(), ..Default::default() } + } + + /// Is this update empty and doesn't contain any information? + pub fn is_empty(&self) -> bool { + self.activated.is_empty() && self.deactivated.is_empty() } } @@ -72,9 +77,9 @@ impl PartialEq for ActiveLeavesUpdate { /// /// Instead, it means equality when `activated` and `deactivated` are considered as sets. fn eq(&self, other: &Self) -> bool { - use std::collections::HashSet; - self.activated.iter().collect::>() == other.activated.iter().collect::>() && - self.deactivated.iter().collect::>() == other.deactivated.iter().collect::>() + self.activated.len() == other.activated.len() && self.deactivated.len() == other.deactivated.len() + && self.activated.iter().all(|a| other.activated.contains(a)) + && self.deactivated.iter().all(|a| other.deactivated.contains(a)) } } @@ -164,7 +169,7 @@ pub struct SpawnedSubsystem { /// Name of the subsystem being spawned. pub name: &'static str, /// The task of the subsystem being spawned. - pub future: BoxFuture<'static, ()>, + pub future: BoxFuture<'static, SubsystemResult<()>>, } /// A `Result` type that wraps [`SubsystemError`]. @@ -204,10 +209,10 @@ pub trait SubsystemContext: Send + 'static { ) -> SubsystemResult<()>; /// Send a direct message to some other `Subsystem`, routed based on message type. - async fn send_message(&mut self, msg: AllMessages) -> SubsystemResult<()>; + async fn send_message(&mut self, msg: AllMessages); /// Send multiple direct messages to other `Subsystem`s, routed based on message type. - async fn send_messages(&mut self, msgs: T) -> SubsystemResult<()> + async fn send_messages(&mut self, msgs: T) where T: IntoIterator + Send, T::IntoIter: Send; } @@ -228,14 +233,24 @@ pub trait Subsystem { /// types of messages. Used for tests or as a placeholder. pub struct DummySubsystem; -impl Subsystem for DummySubsystem { +impl Subsystem for DummySubsystem +where + C::Message: std::fmt::Debug +{ fn start(self, mut ctx: C) -> SpawnedSubsystem { let future = Box::pin(async move { loop { match ctx.recv().await { - Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return, - Err(_) => return, - _ => continue, + Err(_) => return Ok(()), + Ok(FromOverseer::Signal(OverseerSignal::Conclude)) => return Ok(()), + Ok(overseer_msg) => { + tracing::debug!( + target: "dummy-subsystem", + "Discarding a message sent from overseer {:?}", + overseer_msg + ); + continue; + } } } }); diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index 425debfd122cdab0c4fc139dd636efeaab1cbac0..3b7ee32ddabb48635f9c64f2bf95e336c1118f7c 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -31,19 +31,21 @@ use polkadot_node_primitives::{ CollationGenerationConfig, MisbehaviorReport, SignedFullStatement, ValidationResult, }; use polkadot_primitives::v1::{ - AuthorityDiscoveryId, AvailableData, BackedCandidate, BlockNumber, + AuthorityDiscoveryId, AvailableData, BackedCandidate, BlockNumber, SessionInfo, Header as BlockHeader, CandidateDescriptor, CandidateEvent, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, ErasureChunk, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, PoV, SessionIndex, SignedAvailabilityBitfield, - ValidationCode, ValidatorId, ValidationData, - ValidatorIndex, ValidatorSignature, InboundDownwardMessage, + ValidationCode, ValidatorId, ValidationData, CandidateHash, + ValidatorIndex, ValidatorSignature, InboundDownwardMessage, InboundHrmpMessage, }; -use std::sync::Arc; +use std::{sync::Arc, collections::btree_map::BTreeMap}; -/// A notification of a new backed candidate. -#[derive(Debug)] -pub struct NewBackedCandidate(pub BackedCandidate); +/// Subsystem messages where each message is always bound to a relay parent. +pub trait BoundToRelayParent { + /// Returns the relay parent this message is bound to. + fn relay_parent(&self) -> Hash; +} /// Messages received by the Candidate Selection subsystem. #[derive(Debug)] @@ -55,12 +57,11 @@ pub enum CandidateSelectionMessage { Invalid(Hash, CandidateReceipt), } -impl CandidateSelectionMessage { - /// If the current variant contains the relay parent hash, return it. - pub fn relay_parent(&self) -> Option { +impl BoundToRelayParent for CandidateSelectionMessage { + fn relay_parent(&self) -> Hash { match self { - Self::Collation(hash, ..) => Some(*hash), - Self::Invalid(hash, _) => Some(*hash), + Self::Collation(hash, ..) => *hash, + Self::Invalid(hash, _) => *hash, } } } @@ -76,7 +77,7 @@ impl Default for CandidateSelectionMessage { pub enum CandidateBackingMessage { /// Requests a set of backable candidates that could be backed in a child of the given /// relay-parent, referenced by its hash. - GetBackedCandidates(Hash, oneshot::Sender>), + GetBackedCandidates(Hash, Vec, oneshot::Sender>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated. Second(Hash, CandidateReceipt, PoV), @@ -85,13 +86,12 @@ pub enum CandidateBackingMessage { Statement(Hash, SignedFullStatement), } -impl CandidateBackingMessage { - /// If the current variant contains the relay parent hash, return it. - pub fn relay_parent(&self) -> Option { +impl BoundToRelayParent for CandidateBackingMessage { + fn relay_parent(&self) -> Hash { match self { - Self::GetBackedCandidates(hash, _) => Some(*hash), - Self::Second(hash, _, _) => Some(*hash), - Self::Statement(hash, _) => Some(*hash), + Self::GetBackedCandidates(hash, _, _) => *hash, + Self::Second(hash, _, _) => *hash, + Self::Statement(hash, _) => *hash, } } } @@ -207,6 +207,7 @@ pub enum NetworkBridgeMessage { /// /// Also ask the network to stay connected to these peers at least /// until the request is revoked. + /// This can be done by dropping the receiver. ConnectToValidators { /// Ids of the validators to connect to. validator_ids: Vec, @@ -214,13 +215,6 @@ pub enum NetworkBridgeMessage { /// the validators as they are connected. /// The response is sent immediately for already connected peers. connected: mpsc::Sender<(AuthorityDiscoveryId, PeerId)>, - /// By revoking the request the caller allows the network to - /// free some peer slots thus freeing the resources. - /// It doesn't necessarily lead to peers disconnection though. - /// The revokation is enacted on in the next connection request. - /// - /// This can be done by sending to the channel or dropping the sender. - revoke: oneshot::Receiver<()>, }, } @@ -278,10 +272,9 @@ impl BitfieldDistributionMessage { #[derive(Debug)] pub enum BitfieldSigningMessage {} -impl BitfieldSigningMessage { - /// If the current variant contains the relay parent hash, return it. - pub fn relay_parent(&self) -> Option { - None +impl BoundToRelayParent for BitfieldSigningMessage { + fn relay_parent(&self) -> Hash { + match *self {} } } @@ -289,31 +282,31 @@ impl BitfieldSigningMessage { #[derive(Debug)] pub enum AvailabilityStoreMessage { /// Query a `AvailableData` from the AV store. - QueryAvailableData(Hash, oneshot::Sender>), + QueryAvailableData(CandidateHash, oneshot::Sender>), /// Query whether a `AvailableData` exists within the AV Store. /// /// This is useful in cases when existence /// matters, but we don't want to necessarily pass around multiple /// megabytes of data to get a single bit of information. - QueryDataAvailability(Hash, oneshot::Sender), + QueryDataAvailability(CandidateHash, oneshot::Sender), /// Query an `ErasureChunk` from the AV store by the candidate hash and validator index. - QueryChunk(Hash, ValidatorIndex, oneshot::Sender>), + QueryChunk(CandidateHash, ValidatorIndex, oneshot::Sender>), /// Query whether an `ErasureChunk` exists within the AV Store. /// /// This is useful in cases like bitfield signing, when existence /// matters, but we don't want to necessarily pass around large /// quantities of data to get a single bit of information. - QueryChunkAvailability(Hash, ValidatorIndex, oneshot::Sender), + QueryChunkAvailability(CandidateHash, ValidatorIndex, oneshot::Sender), /// Store an `ErasureChunk` in the AV store. /// /// Return `Ok(())` if the store operation succeeded, `Err(())` if it failed. StoreChunk { /// A hash of the candidate this chunk belongs to. - candidate_hash: Hash, + candidate_hash: CandidateHash, /// A relevant relay parent. relay_parent: Hash, /// The index of the validator this chunk belongs to. @@ -328,7 +321,7 @@ pub enum AvailabilityStoreMessage { /// If `ValidatorIndex` is present store corresponding chunk also. /// /// Return `Ok(())` if the store operation succeeded, `Err(())` if it failed. - StoreAvailableData(Hash, Option, u32, AvailableData, oneshot::Sender>), + StoreAvailableData(CandidateHash, Option, u32, AvailableData, oneshot::Sender>), } impl AvailabilityStoreMessage { @@ -411,7 +404,7 @@ pub enum RuntimeApiRequest { /// Sends back `true` if the validation outputs pass all acceptance criteria checks. CheckValidationOutputs( ParaId, - polkadot_primitives::v1::ValidationOutputs, + polkadot_primitives::v1::CandidateCommitments, RuntimeApiSender, ), /// Get the session index that a child of the block will have. @@ -424,24 +417,34 @@ pub enum RuntimeApiRequest { OccupiedCoreAssumption, RuntimeApiSender>, ), + /// Fetch the historical validation code used by a para for candidates executed in the + /// context of a given block height in the current chain. + /// + /// `context_height` may be no greater than the height of the block in whose + /// state the runtime API is executed. Otherwise `None` is returned. + HistoricalValidationCode( + ParaId, + BlockNumber, + RuntimeApiSender>, + ), /// Get a the candidate pending availability for a particular parachain by parachain / core index CandidatePendingAvailability(ParaId, RuntimeApiSender>), /// Get all events concerning candidates (backing, inclusion, time-out) in the parent of /// the block in whose state this request is executed. CandidateEvents(RuntimeApiSender>), - /// Get the `AuthorityDiscoveryId`s corresponding to the given `ValidatorId`s. - /// Currently this request is limited to validators in the current session. - /// - /// Returns `None` for validators not found in the current session. - ValidatorDiscovery( - Vec, - RuntimeApiSender>>, - ), + /// Get the session info for the given session, if stored. + SessionInfo(SessionIndex, RuntimeApiSender>), /// Get all the pending inbound messages in the downward message queue for a para. DmqContents( ParaId, RuntimeApiSender>>, ), + /// Get the contents of all channels addressed to the given recipient. Channels that have no + /// messages in them are also included. + InboundHrmpChannelsContents( + ParaId, + RuntimeApiSender>>>, + ), } /// A message to the Runtime API subsystem. @@ -490,7 +493,7 @@ pub enum ProvisionableData { /// This bitfield indicates the availability of various candidate blocks. Bitfield(Hash, SignedAvailabilityBitfield), /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. - BackedCandidate(BackedCandidate), + BackedCandidate(CandidateReceipt), /// Misbehavior reports are self-contained proofs of validator misbehavior. MisbehaviorReport(Hash, MisbehaviorReport), /// Disputes trigger a broad dispute resolution process. @@ -517,16 +520,15 @@ pub enum ProvisionerMessage { /// where it can be assembled into the InclusionInherent. RequestInherentData(Hash, oneshot::Sender), /// This data should become part of a relay chain block - ProvisionableData(ProvisionableData), + ProvisionableData(Hash, ProvisionableData), } -impl ProvisionerMessage { - /// If the current variant contains the relay parent hash, return it. - pub fn relay_parent(&self) -> Option { +impl BoundToRelayParent for ProvisionerMessage { + fn relay_parent(&self) -> Hash { match self { - Self::RequestBlockAuthorshipData(hash, _) => Some(*hash), - Self::RequestInherentData(hash, _) => Some(*hash), - Self::ProvisionableData(_) => None, + Self::RequestBlockAuthorshipData(hash, _) => *hash, + Self::RequestInherentData(hash, _) => *hash, + Self::ProvisionableData(hash, _) => *hash, } } } diff --git a/node/test/client/Cargo.toml b/node/test/client/Cargo.toml index 794dc2ea08db7acbca8b6664b32577d9ceeb5180..e64c958b07ce9e1e876303df4f494874adad67dd 100644 --- a/node/test/client/Cargo.toml +++ b/node/test/client/Cargo.toml @@ -1,30 +1,31 @@ [package] name = "polkadot-test-client" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } # Polkadot dependencies polkadot-test-runtime = { path = "../../../runtime/test-runtime" } polkadot-test-service = { path = "../service" } polkadot-primitives = { path = "../../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } # Substrate dependencies -substrate-test-client = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-service = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } +substrate-test-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-inherents = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } [dev-dependencies] -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/node/test/client/src/block_builder.rs b/node/test/client/src/block_builder.rs index f377bf6c1603e9041e89a914751018fd9bc8b73c..de49ae48867593e44be4301f4ac2c1fb5237592d 100644 --- a/node/test/client/src/block_builder.rs +++ b/node/test/client/src/block_builder.rs @@ -21,7 +21,7 @@ use sp_runtime::generic::BlockId; use sp_api::ProvideRuntimeApi; use sc_block_builder::{BlockBuilderProvider, BlockBuilder}; use sp_state_machine::BasicExternalities; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; /// An extension for the test client to init a Polkadot specific block builder. pub trait InitPolkadotBlockBuilder { @@ -69,7 +69,14 @@ impl InitPolkadotBlockBuilder for Client { inherent_data .put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) - .expect("Put timestamp failed"); + .expect("Put timestamp inherent data"); + + inherent_data + .put_data( + polkadot_primitives::v1::INCLUSION_INHERENT_IDENTIFIER, + &polkadot_node_subsystem::messages::ProvisionerInherentData::default(), + ) + .expect("Put inclusion inherent data"); let inherents = block_builder.create_inherents(inherent_data).expect("Creates inherents"); diff --git a/node/test/service/Cargo.toml b/node/test/service/Cargo.toml index c7da84ece60da09ac8d3114c40f057114be85760..577094d520401c33467d8c4d151bb11e82e5925c 100644 --- a/node/test/service/Cargo.toml +++ b/node/test/service/Cargo.toml @@ -1,58 +1,62 @@ [package] name = "polkadot-test-service" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] -futures = "0.3.4" +futures = "0.3.8" futures01 = { package = "futures", version = "0.1.29" } -hex = "0.4" -log = "0.4.8" +hex = "0.4.2" +tracing = "0.1.22" +tracing-futures = "0.2.4" rand = "0.7.3" tempfile = "3.1.0" # Polkadot dependencies polkadot-overseer = { path = "../../overseer" } polkadot-primitives = { path = "../../../primitives" } +polkadot-parachain = { path = "../../../parachain" } polkadot-rpc = { path = "../../../rpc" } polkadot-runtime-common = { path = "../../../runtime/common" } polkadot-service = { path = "../../service" } +polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-primitives = { path = "../../primitives" } polkadot-test-runtime = { path = "../../../runtime/test-runtime" } polkadot-runtime-parachains = { path = "../../../runtime/parachains" } # Substrate dependencies -authority-discovery = { package = "sc-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master" } -babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } -consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master" } -grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } -grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-cli = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-informant = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "master" } -service = { package = "sc-service", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "master" } -substrate-test-client = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +babe = { package = "sc-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +consensus_common = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +grandpa_primitives = { package = "sp-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-network = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-transaction-pool = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +service = { package = "sc-service", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-state-machine = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +substrate-test-client = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } [dev-dependencies] -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -serde_json = "1.0" -substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "master" } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +serde_json = "1.0.60" +substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } tokio = { version = "0.2", features = ["macros"] } diff --git a/node/test/service/src/chain_spec.rs b/node/test/service/src/chain_spec.rs index e677d0b9ac4c86e3aa3faa9fae3d25b0640a69d4..546467ad8cc089b427af9ef852b83797c0916c00 100644 --- a/node/test/service/src/chain_spec.rs +++ b/node/test/service/src/chain_spec.rs @@ -16,10 +16,11 @@ //! Chain specifications for the test runtime. +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use babe_primitives::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; use pallet_staking::Forcing; -use polkadot_primitives::v0::{ValidatorId, AccountId}; +use polkadot_primitives::v1::{ValidatorId, AccountId}; use polkadot_service::chain_spec::{get_account_id_from_seed, get_from_seed, Extensions}; use polkadot_test_runtime::constants::currency::DOTS; use sc_chain_spec::{ChainSpec, ChainType}; @@ -53,7 +54,6 @@ pub fn polkadot_local_testnet_genesis() -> polkadot_test_runtime::GenesisConfig vec![ get_authority_keys_from_seed("Alice"), get_authority_keys_from_seed("Bob"), - get_authority_keys_from_seed("Charlie"), ], get_account_id_from_seed::("Alice"), None, @@ -63,13 +63,14 @@ pub fn polkadot_local_testnet_genesis() -> polkadot_test_runtime::GenesisConfig /// Helper function to generate stash, controller and session key from seed fn get_authority_keys_from_seed( seed: &str, -) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId) { +) -> (AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AuthorityDiscoveryId) { ( get_account_id_from_seed::(&format!("{}//stash", seed)), get_account_id_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), + get_from_seed::(seed), ) } @@ -92,46 +93,47 @@ fn testnet_accounts() -> Vec { /// Helper function to create polkadot GenesisConfig for testing fn polkadot_testnet_genesis( - initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ValidatorId)>, + initial_authorities: Vec<(AccountId, AccountId, BabeId, GrandpaId, ValidatorId, AuthorityDiscoveryId)>, root_key: AccountId, endowed_accounts: Option>, ) -> polkadot_test_runtime::GenesisConfig { - use polkadot_test_runtime as polkadot; + use polkadot_test_runtime as runtime; let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(testnet_accounts); const ENDOWMENT: u128 = 1_000_000 * DOTS; const STASH: u128 = 100 * DOTS; - polkadot::GenesisConfig { - frame_system: Some(polkadot::SystemConfig { - code: polkadot::WASM_BINARY.expect("Wasm binary must be built for testing").to_vec(), + runtime::GenesisConfig { + frame_system: Some(runtime::SystemConfig { + code: runtime::WASM_BINARY.expect("Wasm binary must be built for testing").to_vec(), ..Default::default() }), - pallet_indices: Some(polkadot::IndicesConfig { indices: vec![] }), - pallet_balances: Some(polkadot::BalancesConfig { + pallet_indices: Some(runtime::IndicesConfig { indices: vec![] }), + pallet_balances: Some(runtime::BalancesConfig { balances: endowed_accounts .iter() .map(|k| (k.clone(), ENDOWMENT)) .collect(), }), - pallet_session: Some(polkadot::SessionConfig { + pallet_session: Some(runtime::SessionConfig { keys: initial_authorities .iter() .map(|x| { ( x.0.clone(), x.0.clone(), - polkadot_test_runtime::SessionKeys { + runtime::SessionKeys { babe: x.2.clone(), grandpa: x.3.clone(), parachain_validator: x.4.clone(), + authority_discovery: x.5.clone(), }, ) }) .collect::>(), }), - pallet_staking: Some(polkadot::StakingConfig { + pallet_staking: Some(runtime::StakingConfig { minimum_validator_count: 1, validator_count: 2, stakers: initial_authorities @@ -141,7 +143,7 @@ fn polkadot_testnet_genesis( x.0.clone(), x.1.clone(), STASH, - polkadot::StakerStatus::Validator, + runtime::StakerStatus::Validator, ) }) .collect(), @@ -152,13 +154,28 @@ fn polkadot_testnet_genesis( }), pallet_babe: Some(Default::default()), pallet_grandpa: Some(Default::default()), - pallet_authority_discovery: Some(polkadot::AuthorityDiscoveryConfig { keys: vec![] }), - claims: Some(polkadot::ClaimsConfig { + pallet_authority_discovery: Some(runtime::AuthorityDiscoveryConfig { keys: vec![] }), + claims: Some(runtime::ClaimsConfig { claims: vec![], vesting: vec![], }), - pallet_vesting: Some(polkadot::VestingConfig { vesting: vec![] }), - pallet_sudo: Some(polkadot::SudoConfig { key: root_key }), + pallet_vesting: Some(runtime::VestingConfig { vesting: vec![] }), + pallet_sudo: Some(runtime::SudoConfig { key: root_key }), + parachains_configuration: Some(runtime::ParachainsConfigurationConfig { + config: polkadot_runtime_parachains::configuration::HostConfiguration { + validation_upgrade_frequency: 10u32, + validation_upgrade_delay: 5, + acceptance_period: 1200, + max_code_size: 5 * 1024 * 1024, + max_pov_size: 50 * 1024 * 1024, + max_head_data_size: 32 * 1024, + group_rotation_frequency: 20, + chain_availability_period: 4, + thread_availability_period: 4, + no_show_slots: 10, + ..Default::default() + }, + }), } } diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index 4a7785cd99607d9dd5ed19df9c68ecbdbce8b4b2..4a4677a0ef1108023fd88d4cfff3153ef0ab915f 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -23,17 +23,22 @@ pub mod chain_spec; pub use chain_spec::*; use futures::future::Future; use polkadot_overseer::OverseerHandler; -use polkadot_primitives::v1::{Id as ParaId, HeadData, ValidationCode, Balance}; +use polkadot_primitives::v1::{ + Id as ParaId, HeadData, ValidationCode, Balance, CollatorPair, CollatorId, +}; use polkadot_runtime_common::BlockHashCount; use polkadot_service::{ - new_full, NewFull, FullClient, ClientHandle, ExecuteWithClient, IsCollator, + NewFull, FullClient, ClientHandle, ExecuteWithClient, IsCollator, +}; +use polkadot_node_subsystem::messages::{CollatorProtocolMessage, CollationGenerationMessage}; +use polkadot_test_runtime::{ + Runtime, SignedExtra, SignedPayload, VERSION, ParasSudoWrapperCall, SudoCall, UncheckedExtrinsic, }; -use polkadot_test_runtime::{Runtime, SignedExtra, SignedPayload, VERSION, ParasSudoWrapperCall, UncheckedExtrinsic}; +use polkadot_node_primitives::{CollatorFn, CollationGenerationConfig}; use polkadot_runtime_parachains::paras::ParaGenesisArgs; use sc_chain_spec::ChainSpec; use sc_client_api::execution_extensions::ExecutionStrategies; use sc_executor::native_executor_instance; -use sc_informant::OutputFormat; use sc_network::{ config::{NetworkConfiguration, TransportConfig}, multiaddr, @@ -64,20 +69,20 @@ pub type Client = FullClient Result< NewFull>, ServiceError, > { - new_full::( + polkadot_service::new_full::( config, - authority_discovery_disabled, - IsCollator::No, + is_collator, None, + polkadot_parachain::wasm_executor::IsolationStrategy::InProcess, ).map_err(Into::into) } @@ -102,11 +107,14 @@ pub fn node_config( task_executor: TaskExecutor, key: Sr25519Keyring, boot_nodes: Vec, + is_validator: bool, ) -> Configuration { let base_path = BasePath::new_temp_dir().expect("could not create temporary directory"); let root = base_path.path(); - let role = Role::Authority { - sentry_nodes: Vec::new(), + let role = if is_validator { + Role::Authority { sentry_nodes: Vec::new() } + } else { + Role::Full }; let key_seed = key.to_seed(); let mut spec = polkadot_local_testnet_config(); @@ -124,17 +132,19 @@ pub fn node_config( Default::default(), None, ); - let informant_output_format = OutputFormat { - enable_color: false, - }; network_config.boot_nodes = boot_nodes; network_config.allow_non_globals_in_dht = true; + let addr: multiaddr::Multiaddr = multiaddr::Protocol::Memory(rand::random()).into(); network_config .listen_addresses - .push(multiaddr::Protocol::Memory(rand::random()).into()); + .push(addr.clone()); + + network_config + .public_addresses + .push(addr); network_config.transport = TransportConfig::MemoryOnly; @@ -145,10 +155,8 @@ pub fn node_config( task_executor, transaction_pool: Default::default(), network: network_config, - keystore: KeystoreConfig::Path { - path: root.join("key"), - password: None, - }, + keystore: KeystoreConfig::InMemory, + keystore_remote: Default::default(), database: DatabaseConfig::RocksDb { path: root.join("db"), cache_size: 128, @@ -186,29 +194,65 @@ pub fn node_config( max_runtime_instances: 8, announce_block: true, base_path: Some(base_path), - informant_output_format, + informant_output_format: Default::default(), + disable_log_reloading: false, + } +} + +/// Run a test validator node that uses the test runtime. +/// +/// The node will be using an in-memory socket, therefore you need to provide boot nodes if you +/// want it to be connected to other nodes. +/// +/// The `storage_update_func` function will be executed in an externalities provided environment +/// and can be used to make adjustements to the runtime genesis storage. +pub fn run_validator_node( + task_executor: TaskExecutor, + key: Sr25519Keyring, + storage_update_func: impl Fn(), + boot_nodes: Vec, +) -> PolkadotTestNode { + let config = node_config(storage_update_func, task_executor, key, boot_nodes, true); + let multiaddr = config.network.listen_addresses[0].clone(); + let NewFull { task_manager, client, network, rpc_handlers, overseer_handler, .. } = + new_full(config, IsCollator::No).expect("could not create Polkadot test service"); + + let overseer_handler = overseer_handler.expect("test node must have an overseer handler"); + let peer_id = network.local_peer_id().clone(); + let addr = MultiaddrWithPeerId { multiaddr, peer_id }; + + PolkadotTestNode { + task_manager, + client, + overseer_handler, + addr, + rpc_handlers, } } -/// Run a Polkadot test node using the Polkadot test runtime. +/// Run a test collator node that uses the test runtime. /// /// The node will be using an in-memory socket, therefore you need to provide boot nodes if you /// want it to be connected to other nodes. /// /// The `storage_update_func` function will be executed in an externalities provided environment /// and can be used to make adjustements to the runtime genesis storage. -pub fn run_test_node( +/// +/// # Note +/// +/// The collator functionionality still needs to be registered at the node! This can be done using +/// [`PolkadotTestNode::register_collator`]. +pub fn run_collator_node( task_executor: TaskExecutor, key: Sr25519Keyring, storage_update_func: impl Fn(), boot_nodes: Vec, + collator_id: CollatorId, ) -> PolkadotTestNode { - let config = node_config(storage_update_func, task_executor, key, boot_nodes); + let config = node_config(storage_update_func, task_executor, key, boot_nodes, false); let multiaddr = config.network.listen_addresses[0].clone(); - let authority_discovery_disabled = false; - let NewFull {task_manager, client, network, rpc_handlers, overseer_handler, ..} = - polkadot_test_new_full(config, authority_discovery_disabled) - .expect("could not create Polkadot test service"); + let NewFull { task_manager, client, network, rpc_handlers, overseer_handler, .. } = + new_full(config, IsCollator::Yes(collator_id)).expect("could not create Polkadot test service"); let overseer_handler = overseer_handler.expect("test node must have an overseer handler"); let peer_id = network.local_peer_id().clone(); @@ -253,19 +297,19 @@ impl PolkadotTestNode { pub async fn register_parachain( &self, id: ParaId, - validation_code: ValidationCode, - genesis_head: HeadData, + validation_code: impl Into, + genesis_head: impl Into, ) -> Result<(), RpcTransactionError> { let call = ParasSudoWrapperCall::sudo_schedule_para_initialize( id, ParaGenesisArgs { - genesis_head, - validation_code, + genesis_head: genesis_head.into(), + validation_code: validation_code.into(), parachain: true, }, ); - self.send_extrinsic(call, Sr25519Keyring::Alice).await.map(drop) + self.send_extrinsic(SudoCall::sudo(Box::new(call.into())), Sr25519Keyring::Alice).await.map(drop) } /// Wait for `count` blocks to be imported in the node and then exit. This function will not return if no blocks @@ -273,6 +317,28 @@ impl PolkadotTestNode { pub fn wait_for_blocks(&self, count: usize) -> impl Future { self.client.wait_for_blocks(count) } + + /// Register the collator functionality in the overseer of this node. + pub async fn register_collator( + &mut self, + collator_key: CollatorPair, + para_id: ParaId, + collator: CollatorFn, + ) { + let config = CollationGenerationConfig { + key: collator_key, + collator, + para_id, + }; + + self.overseer_handler + .send_msg(CollationGenerationMessage::Initialize(config)) + .await; + + self.overseer_handler + .send_msg(CollatorProtocolMessage::CollateOn(para_id)) + .await; + } } /// Construct an extrinsic that can be applied to the test runtime. diff --git a/node/test/service/tests/build-blocks.rs b/node/test/service/tests/build-blocks.rs index b809f188aafc9fff50e5cbfbdb521efe54c54cc8..bad22e7f0ff7808f479fe6de5b18899cf407bd96 100644 --- a/node/test/service/tests/build-blocks.rs +++ b/node/test/service/tests/build-blocks.rs @@ -21,13 +21,15 @@ use sp_keyring::Sr25519Keyring; #[substrate_test_utils::test] async fn ensure_test_service_build_blocks(task_executor: TaskExecutor) { - let mut alice = run_test_node( + sc_cli::init_logger("", Default::default(), None, false).expect("Sets up logger"); + + let mut alice = run_validator_node( task_executor.clone(), Sr25519Keyring::Alice, || {}, Vec::new(), ); - let mut bob = run_test_node( + let mut bob = run_validator_node( task_executor.clone(), Sr25519Keyring::Bob, || {}, diff --git a/node/test/service/tests/call-function.rs b/node/test/service/tests/call-function.rs index 184755627f87cdfc8db5c930835240694c8e4b50..c6802234c9c81fee65500e73fd1ff9f248ed8866 100644 --- a/node/test/service/tests/call-function.rs +++ b/node/test/service/tests/call-function.rs @@ -20,7 +20,7 @@ use sp_keyring::Sr25519Keyring::{Alice, Bob}; #[substrate_test_utils::test] async fn call_function_actually_work(task_executor: TaskExecutor) { - let alice = run_test_node(task_executor, Alice, || {}, Vec::new()); + let alice = run_validator_node(task_executor, Alice, || {}, Vec::new()); let function = polkadot_test_runtime::Call::Balances(pallet_balances::Call::transfer( Default::default(), @@ -37,7 +37,7 @@ async fn call_function_actually_work(task_executor: TaskExecutor) { assert_eq!( result.as_str().map(|x| x.starts_with("0x")), Some(true), - "result starts with 0x" + "result starts with 0x", ); alice.task_manager.clean_shutdown().await; diff --git a/parachain/Cargo.toml b/parachain/Cargo.toml index ed9c3c74c909407484eb7b4db14ee88637900795..235a7b2fefe74162c8391a018812d672b54cf325 100644 --- a/parachain/Cargo.toml +++ b/parachain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "polkadot-parachain" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] description = "Types and utilities for creating and working with parachains" edition = "2018" @@ -9,22 +9,23 @@ edition = "2018" # note: special care is taken to avoid inclusion of `sp-io` externals when compiling # this crate for WASM. This is critical to avoid forcing all parachain WASM into implementing # various unnecessary Substrate-specific endpoints. -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = [ "derive" ] } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-wasm-interface = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +parity-scale-codec = { version = "1.3.5", default-features = false, features = [ "derive" ] } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-wasm-interface = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } polkadot-core-primitives = { path = "../core-primitives", default-features = false } +derive_more = "0.99.11" # all optional crates. -derive_more = { version = "0.99.11", optional = true } -serde = { version = "1.0.102", default-features = false, features = [ "derive" ], optional = true } -sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } -sc-executor = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } -parking_lot = { version = "0.10.0", optional = true } -log = { version = "0.4.8", optional = true } -futures = { version = "0.3.4", optional = true } +thiserror = { version = "1.0.22", optional = true } +serde = { version = "1.0.117", default-features = false, features = [ "derive" ], optional = true } +sp-externalities = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } +sc-executor = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } +parking_lot = { version = "0.11.0", optional = true } +log = { version = "0.4.11", optional = true } +futures = { version = "0.3.8", optional = true } [target.'cfg(not(any(target_os = "android", target_os = "unknown")))'.dependencies] shared_memory = { version = "0.10.0", optional = true } @@ -33,8 +34,8 @@ shared_memory = { version = "0.10.0", optional = true } default = ["std"] wasm-api = [] std = [ - "codec/std", - "derive_more", + "parity-scale-codec/std", + "thiserror", "serde/std", "sp-std/std", "sp-runtime/std", diff --git a/parachain/src/primitives.rs b/parachain/src/primitives.rs index 83edc4e0f2a688c9001e8563491d7be73a16c3d4..e3c97620bfd6625c4157a9947465b9bf1e302b26 100644 --- a/parachain/src/primitives.rs +++ b/parachain/src/primitives.rs @@ -19,7 +19,7 @@ use sp_std::vec::Vec; -use codec::{Encode, Decode, CompactAs}; +use parity_scale_codec::{Encode, Decode, CompactAs}; use sp_core::{RuntimeDebug, TypeId}; #[cfg(feature = "std")] @@ -28,37 +28,25 @@ use serde::{Serialize, Deserialize}; #[cfg(feature = "std")] use sp_core::bytes; -use polkadot_core_primitives::Hash; +use polkadot_core_primitives::{Hash, OutboundHrmpMessage}; /// Block number type used by the relay chain. pub use polkadot_core_primitives::BlockNumber as RelayChainBlockNumber; /// Parachain head data included in the chain. -#[derive(PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode, RuntimeDebug)] +#[derive(PartialEq, Eq, Clone, PartialOrd, Ord, Encode, Decode, RuntimeDebug, derive_more::From)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Default, Hash))] pub struct HeadData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); -impl From> for HeadData { - fn from(head: Vec) -> Self { - HeadData(head) - } -} - /// Parachain validation code. -#[derive(Default, PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +#[derive(Default, PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, derive_more::From)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash))] pub struct ValidationCode(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); -impl From> for ValidationCode { - fn from(code: Vec) -> Self { - ValidationCode(code) - } -} - /// Parachain block data. /// /// Contains everything required to validate para-block, may contain block and witness data. -#[derive(PartialEq, Eq, Clone, Encode, Decode)] +#[derive(PartialEq, Eq, Clone, Encode, Decode, derive_more::From)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Debug))] pub struct BlockData(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); @@ -147,6 +135,45 @@ impl sp_std::ops::Add for Id { } } +#[derive(Clone, Copy, Default, Encode, Decode, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +pub struct Sibling(pub Id); + +impl From for Sibling { + fn from(i: Id) -> Self { + Self(i) + } +} + +impl From for Id { + fn from(i: Sibling) -> Self { + i.0 + } +} + +impl AsRef for Sibling { + fn as_ref(&self) -> &Id { + &self.0 + } +} + +impl TypeId for Sibling { + const TYPE_ID: [u8; 4] = *b"sibl"; +} + +impl From for u32 { + fn from(x: Sibling) -> Self { x.0.into() } +} + +impl From for Sibling { + fn from(x: u32) -> Self { Sibling(x.into()) } +} + +impl IsSystem for Sibling { + fn is_system(&self) -> bool { + IsSystem::is_system(&self.0) + } +} + /// This type can be converted into and possibly from an AccountId (which itself is generic). pub trait AccountIdConversion: Sized { /// Convert into an account ID. This is infallible. @@ -159,12 +186,12 @@ pub trait AccountIdConversion: Sized { // TODO: Remove all of this, move sp-runtime::AccountIdConversion to own crate and and use that. // #360 struct TrailingZeroInput<'a>(&'a [u8]); -impl<'a> codec::Input for TrailingZeroInput<'a> { - fn remaining_len(&mut self) -> Result, codec::Error> { +impl<'a> parity_scale_codec::Input for TrailingZeroInput<'a> { + fn remaining_len(&mut self) -> Result, parity_scale_codec::Error> { Ok(None) } - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + fn read(&mut self, into: &mut [u8]) -> Result<(), parity_scale_codec::Error> { let len = into.len().min(self.0.len()); into[..len].copy_from_slice(&self.0[..len]); for i in &mut into[len..] { @@ -198,44 +225,23 @@ impl AccountIdConversion for Id { } } -/// Which origin a parachain's message to the relay chain should be dispatched from. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug, Hash))] -#[repr(u8)] -pub enum ParachainDispatchOrigin { - /// As a simple `Origin::Signed`, using `ParaId::account_id` as its value. This is good when - /// interacting with standard modules such as `balances`. - Signed, - /// As the special `Origin::Parachain(ParaId)`. This is good when interacting with parachain- - /// aware modules which need to succinctly verify that the origin is a parachain. - Parachain, - /// As the simple, superuser `Origin::Root`. This can only be done on specially permissioned - /// parachains. - Root, -} - -impl sp_std::convert::TryFrom for ParachainDispatchOrigin { - type Error = (); - fn try_from(x: u8) -> core::result::Result { - const SIGNED: u8 = ParachainDispatchOrigin::Signed as u8; - const PARACHAIN: u8 = ParachainDispatchOrigin::Parachain as u8; - Ok(match x { - SIGNED => ParachainDispatchOrigin::Signed, - PARACHAIN => ParachainDispatchOrigin::Parachain, - _ => return Err(()), - }) - } +/// A type that uniquely identifies an HRMP channel. An HRMP channel is established between two paras. +/// In text, we use the notation `(A, B)` to specify a channel between A and B. The channels are +/// unidirectional, meaning that `(A, B)` and `(B, A)` refer to different channels. The convention is +/// that we use the first item tuple for the sender and the second for the recipient. Only one channel +/// is allowed between two participants in one direction, i.e. there cannot be 2 different channels +/// identified by `(A, B)`. +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Hash))] +pub struct HrmpChannelId { + /// The para that acts as the sender in this channel. + pub sender: Id, + /// The para that acts as the recipient in this channel. + pub recipient: Id, } /// A message from a parachain to its Relay Chain. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[cfg_attr(feature = "std", derive(Debug, Hash))] -pub struct UpwardMessage { - /// The origin for the message to be sent from. - pub origin: ParachainDispatchOrigin, - /// The message data. - pub data: Vec, -} +pub type UpwardMessage = Vec; /// Validation parameters for evaluating the parachain validity function. // TODO: balance downloads (https://github.com/paritytech/polkadot/issues/220) @@ -260,7 +266,7 @@ pub struct ValidationParams { } /// The result of parachain validation. -// TODO: egress and balance uploads (https://github.com/paritytech/polkadot/issues/220) +// TODO: balance uploads (https://github.com/paritytech/polkadot/issues/220) #[derive(PartialEq, Eq, Encode)] #[cfg_attr(feature = "std", derive(Debug, Decode))] pub struct ValidationResult { @@ -270,8 +276,12 @@ pub struct ValidationResult { pub new_validation_code: Option, /// Upward messages send by the Parachain. pub upward_messages: Vec, + /// Outbound horizontal messages sent by the parachain. + pub horizontal_messages: Vec>, /// Number of downward messages that were processed by the Parachain. /// /// It is expected that the Parachain processes them from first to last. pub processed_downward_messages: u32, + /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + pub hrmp_watermark: RelayChainBlockNumber, } diff --git a/parachain/src/wasm_api.rs b/parachain/src/wasm_api.rs index 9c7eac25f1e57516c31c62a117bfe48f632e0d5f..99bed554147b76b5fe2ed4f4627dc6318bb3b416 100644 --- a/parachain/src/wasm_api.rs +++ b/parachain/src/wasm_api.rs @@ -26,7 +26,7 @@ pub unsafe fn load_params(params: *const u8, len: usize) { let mut slice = sp_std::slice::from_raw_parts(params, len); - codec::Decode::decode(&mut slice).expect("Invalid input data") + parity_scale_codec::Decode::decode(&mut slice).expect("Invalid input data") } /// Allocate the validation result in memory, getting the return-pointer back. diff --git a/parachain/src/wasm_executor/mod.rs b/parachain/src/wasm_executor/mod.rs index f578039362cc0dc156ba5b5c3a909d6eec85bc39..2ab2e9064cc86b3f3a740ac8808e19872d927ee6 100644 --- a/parachain/src/wasm_executor/mod.rs +++ b/parachain/src/wasm_executor/mod.rs @@ -22,7 +22,7 @@ use std::{any::{TypeId, Any}, path::PathBuf}; use crate::primitives::{ValidationParams, ValidationResult}; -use codec::{Decode, Encode}; +use parity_scale_codec::{Decode, Encode}; use sp_core::{storage::{ChildInfo, TrackedStorageKey}, traits::{CallInWasm, SpawnNamed}}; use sp_externalities::Extensions; use sp_wasm_interface::HostFunctions as _; @@ -37,38 +37,49 @@ const MAX_RUNTIME_MEM: usize = 1024 * 1024 * 1024; // 1 GiB const MAX_CODE_MEM: usize = 16 * 1024 * 1024; // 16 MiB const MAX_VALIDATION_RESULT_HEADER_MEM: usize = MAX_CODE_MEM + 1024; // 16.001 MiB -/// A stub validation-pool defined when compiling for Android or WASM. -#[cfg(any(target_os = "android", target_os = "unknown"))] -#[derive(Clone)] -pub struct ValidationPool { - _inner: (), // private field means not publicly-instantiable -} - -#[cfg(any(target_os = "android", target_os = "unknown"))] -impl ValidationPool { - /// Create a new `ValidationPool`. - pub fn new() -> Self { - ValidationPool { _inner: () } - } -} - -/// A stub function defined when compiling for Android or WASM. -#[cfg(any(target_os = "android", target_os = "unknown"))] -pub fn run_worker(_: &str) -> Result<(), String> { - Err("Cannot run validation worker on this platform".to_string()) -} - -/// The execution mode for the `ValidationPool`. -#[derive(Clone)] -#[cfg_attr(not(any(target_os = "android", target_os = "unknown")), derive(Debug))] -pub enum ExecutionMode { +/// The strategy we employ for isolating execution of wasm parachain validation function (PVF). +/// +/// For a typical validator an external process is the default way to run PVF. The rationale is based +/// on the following observations: +/// +/// (a) PVF is completely under control of parachain developers who may or may not be malicious. +/// (b) Collators are in charge of providing PoV who also may or may not be malicious. +/// (c) PVF is executed by a wasm engine based on optimizing compiler which is a very complex piece +/// of machinery. +/// +/// (a) and (b) may lead to a situation where due to a combination of PVF and PoV the validation work +/// can stuck in an infinite loop, which can open up resource exhaustion or DoS attack vectors. +/// +/// While some execution engines provide functionality to interrupt execution of wasm module from +/// another thread, there are also some caveats to that: there is no clean way to interrupt execution +/// if the control flow is in the host side and at the moment we haven't rigoriously vetted that all +/// host functions terminate or, at least, return in a short amount of time. Additionally, we want +/// some freedom on choosing wasm execution environment. +/// +/// On top of that, execution in a separate process helps to minimize impact of (c) if exploited. +/// It's not only the risk of miscompilation, but it also includes risk of JIT-bombs, i.e. cases +/// of specially crafted code that take enourmous amounts of time and memory to compile. +/// +/// At the same time, since PVF validates self-contained candidates, validation workers don't require +/// extensive communication with polkadot host, therefore there should be no observable performance penalty +/// coming from inter process communication. +/// +/// All of the above should give a sense why isolation is crucial for a typical use-case. +/// +/// However, in some cases, e.g. when running PVF validation on android (for whatever reason), we +/// cannot afford the luxury of process isolation and thus there is an option to run validation in +/// process. Also, running in process is convenient for testing. +#[derive(Clone, Debug)] +pub enum IsolationStrategy { /// The validation worker is ran in a thread inside the same process. InProcess, /// The validation worker is ran using the process' executable and the subcommand `validation-worker` is passed /// following by the address of the shared memory. + #[cfg(not(any(target_os = "android", target_os = "unknown")))] ExternalProcessSelfHost(ValidationPool), /// The validation worker is ran using the command provided and the argument provided. The address of the shared /// memory is added at the end of the arguments. + #[cfg(not(any(target_os = "android", target_os = "unknown")))] ExternalProcessCustomHost { /// Validation pool. pool: ValidationPool, @@ -80,64 +91,75 @@ pub enum ExecutionMode { }, } +impl Default for IsolationStrategy { + fn default() -> Self { + #[cfg(not(any(target_os = "android", target_os = "unknown")))] + { + Self::ExternalProcessSelfHost(ValidationPool::new()) + } -#[derive(Debug, derive_more::Display, derive_more::From)] + #[cfg(any(target_os = "android", target_os = "unknown"))] + { + Self::InProcess + } + } +} + +#[derive(Debug, thiserror::Error)] /// Candidate validation error. pub enum ValidationError { /// Validation failed due to internal reasons. The candidate might still be valid. - Internal(InternalError), + #[error(transparent)] + Internal(#[from] InternalError), /// Candidate is invalid. - InvalidCandidate(InvalidCandidate), + #[error(transparent)] + InvalidCandidate(#[from] InvalidCandidate), } /// Error type that indicates invalid candidate. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum InvalidCandidate { /// Wasm executor error. - #[display(fmt = "WASM executor error: {:?}", _0)] - WasmExecutor(sc_executor::error::Error), + #[error("WASM executor error")] + WasmExecutor(#[from] sc_executor::error::Error), /// Call data is too large. - #[display(fmt = "Validation parameters are {} bytes, max allowed is {}", _0, MAX_RUNTIME_MEM)] - #[from(ignore)] + #[error("Validation parameters are {0} bytes, max allowed is {}", MAX_RUNTIME_MEM)] ParamsTooLarge(usize), /// Code size it too large. - #[display(fmt = "WASM code is {} bytes, max allowed is {}", _0, MAX_CODE_MEM)] + #[error("WASM code is {0} bytes, max allowed is {}", MAX_CODE_MEM)] CodeTooLarge(usize), /// Error decoding returned data. - #[display(fmt = "Validation function returned invalid data.")] + #[error("Validation function returned invalid data.")] BadReturn, - #[display(fmt = "Validation function timeout.")] + #[error("Validation function timeout.")] Timeout, - #[display(fmt = "External WASM execution error: {}", _0)] + #[error("External WASM execution error: {0}")] ExternalWasmExecutor(String), } +impl core::convert::From for InvalidCandidate { + fn from(s: String) -> Self { + Self::ExternalWasmExecutor(s) + } +} + /// Host error during candidate validation. This does not indicate an invalid candidate. -#[derive(Debug, derive_more::Display, derive_more::From)] +#[derive(Debug, thiserror::Error)] pub enum InternalError { - #[display(fmt = "IO error: {}", _0)] - Io(std::io::Error), - #[display(fmt = "System error: {}", _0)] - System(Box), - #[display(fmt = "Shared memory error: {}", _0)] + #[error("IO error: {0}")] + Io(#[from] std::io::Error), + + #[error("System error: {0}")] + System(#[from] Box), + #[cfg(not(any(target_os = "android", target_os = "unknown")))] - SharedMem(shared_memory::SharedMemError), - #[display(fmt = "WASM worker error: {}", _0)] + #[error("Shared memory error: {0}")] + SharedMem(#[from] shared_memory::SharedMemError), + + #[error("WASM worker error: {0}")] WasmWorker(String), } -impl std::error::Error for ValidationError { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - ValidationError::Internal(InternalError::Io(ref err)) => Some(err), - ValidationError::Internal(InternalError::System(ref err)) => Some(&**err), - #[cfg(not(any(target_os = "android", target_os = "unknown")))] - ValidationError::Internal(InternalError::SharedMem(ref err)) => Some(err), - ValidationError::InvalidCandidate(InvalidCandidate::WasmExecutor(ref err)) => Some(err), - _ => None, - } - } -} /// Validate a candidate under the given validation code. /// @@ -145,29 +167,22 @@ impl std::error::Error for ValidationError { pub fn validate_candidate( validation_code: &[u8], params: ValidationParams, - execution_mode: &ExecutionMode, + isolation_strategy: &IsolationStrategy, spawner: impl SpawnNamed + 'static, ) -> Result { - match execution_mode { - ExecutionMode::InProcess => { + match isolation_strategy { + IsolationStrategy::InProcess => { validate_candidate_internal(validation_code, ¶ms.encode(), spawner) }, #[cfg(not(any(target_os = "android", target_os = "unknown")))] - ExecutionMode::ExternalProcessSelfHost(pool) => { + IsolationStrategy::ExternalProcessSelfHost(pool) => { pool.validate_candidate(validation_code, params) }, #[cfg(not(any(target_os = "android", target_os = "unknown")))] - ExecutionMode::ExternalProcessCustomHost { pool, binary, args } => { + IsolationStrategy::ExternalProcessCustomHost { pool, binary, args } => { let args: Vec<&str> = args.iter().map(|x| x.as_str()).collect(); pool.validate_candidate_custom(validation_code, params, binary, &args) }, - #[cfg(any(target_os = "android", target_os = "unknown"))] - ExecutionMode::ExternalProcessSelfHost(_) | ExecutionMode::ExternalProcessCustomHost { .. } => - Err(ValidationError::Internal(InternalError::System( - Box::::from( - "Remote validator not available".to_string() - ) as Box<_> - ))), } } @@ -230,7 +245,7 @@ impl sp_externalities::Externalities for ValidationExternalities { panic!("child_storage: unsupported feature for parachain validation") } - fn kill_child_storage(&mut self, _: &ChildInfo) { + fn kill_child_storage(&mut self, _: &ChildInfo, _: Option) -> bool { panic!("kill_child_storage: unsupported feature for parachain validation") } diff --git a/parachain/src/wasm_executor/validation_host.rs b/parachain/src/wasm_executor/validation_host.rs index 07367d3318fed093ccc720ab2635bf4222778a64..d49f82b6bd77e63e0a9bd35f602da575260883ae 100644 --- a/parachain/src/wasm_executor/validation_host.rs +++ b/parachain/src/wasm_executor/validation_host.rs @@ -17,7 +17,7 @@ #![cfg(not(any(target_os = "android", target_os = "unknown")))] use std::{process, env, sync::Arc, sync::atomic, path::PathBuf}; -use codec::{Decode, Encode}; +use parity_scale_codec::{Decode, Encode}; use crate::primitives::{ValidationParams, ValidationResult}; use super::{ validate_candidate_internal, ValidationError, InvalidCandidate, InternalError, diff --git a/parachain/test-parachains/Cargo.toml b/parachain/test-parachains/Cargo.toml index d391c1b2314dc70e256b796ad127adb4235cf376..36b0e8bb47a857d12d4a6c0cb957a641ec965a20 100644 --- a/parachain/test-parachains/Cargo.toml +++ b/parachain/test-parachains/Cargo.toml @@ -6,15 +6,15 @@ description = "Integration tests using the test-parachains" edition = "2018" [dependencies] -tiny-keccak = "1.5.0" -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } +tiny-keccak = "2.0.2" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } parachain = { package = "polkadot-parachain", path = ".." } adder = { package = "test-parachain-adder", path = "adder" } halt = { package = "test-parachain-halt", path = "halt" } [dev-dependencies] -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } [features] default = [ "std" ] diff --git a/parachain/test-parachains/adder/Cargo.toml b/parachain/test-parachains/adder/Cargo.toml index e18ba41872f99650435c71f301cb1610916d3165..e007e2465f805ef4631475a1eedfc0f2d23e3078 100644 --- a/parachain/test-parachains/adder/Cargo.toml +++ b/parachain/test-parachains/adder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-adder" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] description = "Test parachain which adds to a number as its state transition" edition = "2018" @@ -8,16 +8,16 @@ build = "build.rs" [dependencies] parachain = { package = "polkadot-parachain", path = "../../", default-features = false, features = [ "wasm-api" ] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -tiny-keccak = "1.5.0" -dlmalloc = { version = "0.1.3", features = [ "global" ] } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +tiny-keccak = { version = "2.0.2", features = ["keccak"] } +dlmalloc = { version = "0.2.1", features = [ "global" ] } # We need to make sure the global allocator is disabled until we have support of full substrate externalities -runtime-io = { package = "sp-io", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, features = [ "disable_allocator" ] } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, features = [ "disable_allocator" ] } [build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } +substrate-wasm-builder = "3.0.0" [features] default = [ "std" ] diff --git a/parachain/test-parachains/adder/build.rs b/parachain/test-parachains/adder/build.rs index 6ed2a43655794c07f4c875c848dd93be24f6dc29..ac1ce327cf9086d6305b3bfb1da3b547df7c28e9 100644 --- a/parachain/test-parachains/adder/build.rs +++ b/parachain/test-parachains/adder/build.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .export_heap_base() .build() } diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..3f0b0413e1fbd77e63be5d1f29252fe8ea6ddb10 --- /dev/null +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "test-parachain-adder-collator" +version = "0.7.26" +authors = ["Parity Technologies "] +description = "Collator for the adder test parachain" +edition = "2018" + +[[bin]] +name = "adder-collator" +path = "src/main.rs" + +[dependencies] +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +futures = "0.3.8" +futures-timer = "3.0.2" +log = "0.4.11" +structopt = "0.3.21" + +test-parachain-adder = { path = ".." } +polkadot-primitives = { path = "../../../../primitives" } +polkadot-cli = { path = "../../../../cli" } +polkadot-service = { path = "../../../../node/service" } +polkadot-node-primitives = { path = "../../../../node/primitives" } +polkadot-node-subsystem = { path = "../../../../node/subsystem" } + +sc-cli = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } + +[dev-dependencies] +polkadot-parachain = { path = "../../.." } +polkadot-test-service = { path = "../../../../node/test/service" } + +substrate-test-utils = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-service = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } + +tokio = { version = "0.2", features = ["macros"] } + +[features] +real-overseer = [ "polkadot-service/real-overseer" ] diff --git a/parachain/test-parachains/adder/collator/README.md b/parachain/test-parachains/adder/collator/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e3181a84dd5076afb989683d33ee8d9e2a893065 --- /dev/null +++ b/parachain/test-parachains/adder/collator/README.md @@ -0,0 +1,17 @@ +# How to run this collator + +First start two validators that will run for the relay chain: +```sh +cargo run --features=real-overseer --release -- -d alice --chain rococo-local --validator --alice --port 50551 +cargo run --features=real-overseer --release -- -d bob --chain rococo-local --validator --bob --port 50552 +``` + +Next start the collator that will collate for the adder parachain: +```sh +cargo run --features=real-overseer --release -p test-parachain-adder-collator -- --tmp --chain rococo-local --port 50553 +``` + +The last step is to register the parachain using polkadot-js. The parachain id is +100. The genesis state and the validation code are printed at startup by the collator. + +To do this automatically, run `scripts/adder-collator.sh`. diff --git a/parachain/test-parachains/adder/collator/src/cli.rs b/parachain/test-parachains/adder/collator/src/cli.rs new file mode 100644 index 0000000000000000000000000000000000000000..ae6e9ef9008f29e92c777b2f34acf2c047810e41 --- /dev/null +++ b/parachain/test-parachains/adder/collator/src/cli.rs @@ -0,0 +1,115 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Polkadot CLI library. + +use sc_cli::{RuntimeVersion, SubstrateCli}; +use structopt::StructOpt; + +/// Sub-commands supported by the collator. +#[derive(Debug, StructOpt)] +pub enum Subcommand { + /// Export the genesis state of the parachain. + #[structopt(name = "export-genesis-state")] + ExportGenesisState(ExportGenesisStateCommand), + + /// Export the genesis wasm of the parachain. + #[structopt(name = "export-genesis-wasm")] + ExportGenesisWasm(ExportGenesisWasmCommand), +} + +/// Command for exporting the genesis state of the parachain +#[derive(Debug, StructOpt)] +pub struct ExportGenesisStateCommand {} + +/// Command for exporting the genesis wasm file. +#[derive(Debug, StructOpt)] +pub struct ExportGenesisWasmCommand {} + +#[allow(missing_docs)] +#[derive(Debug, StructOpt)] +pub struct RunCmd { + #[allow(missing_docs)] + #[structopt(flatten)] + pub base: sc_cli::RunCmd, + + /// Id of the parachain this collator collates for. + #[structopt(long)] + pub parachain_id: Option, +} + +#[allow(missing_docs)] +#[derive(Debug, StructOpt)] +pub struct Cli { + #[structopt(subcommand)] + pub subcommand: Option, + + #[structopt(flatten)] + pub run: RunCmd, +} + +impl SubstrateCli for Cli { + fn impl_name() -> String { + "Parity Polkadot".into() + } + + fn impl_version() -> String { + "0.0.0".into() + } + + fn description() -> String { + env!("CARGO_PKG_DESCRIPTION").into() + } + + fn author() -> String { + env!("CARGO_PKG_AUTHORS").into() + } + + fn support_url() -> String { + "https://github.com/paritytech/polkadot/issues/new".into() + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn executable_name() -> String { + "polkadot".into() + } + + fn load_spec(&self, id: &str) -> std::result::Result, String> { + let id = if id.is_empty() { "rococo" } else { id }; + Ok(match id { + "rococo-staging" => { + Box::new(polkadot_service::chain_spec::rococo_staging_testnet_config()?) + } + "rococo-local" => { + Box::new(polkadot_service::chain_spec::rococo_local_testnet_config()?) + } + "rococo" => Box::new(polkadot_service::chain_spec::rococo_config()?), + path => { + let path = std::path::PathBuf::from(path); + Box::new(polkadot_service::RococoChainSpec::from_json_file(path)?) + } + }) + } + + fn native_runtime_version( + _spec: &Box, + ) -> &'static RuntimeVersion { + &polkadot_service::rococo_runtime::VERSION + } +} diff --git a/parachain/test-parachains/adder/collator/src/lib.rs b/parachain/test-parachains/adder/collator/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ac50a4f69396b9f043853475307941ab2377daa5 --- /dev/null +++ b/parachain/test-parachains/adder/collator/src/lib.rs @@ -0,0 +1,278 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Collator for the adder test parachain. + +use futures_timer::Delay; +use polkadot_node_primitives::{Collation, CollatorFn}; +use polkadot_primitives::v1::{CollatorId, CollatorPair, PoV}; +use parity_scale_codec::{Encode, Decode}; +use sp_core::Pair; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + time::Duration, +}; +use test_parachain_adder::{execute, hash_state, BlockData, HeadData}; + +/// The amount we add when producing a new block. +/// +/// This is a constant to make tests easily reproducible. +const ADD: u64 = 2; + +/// Calculates the head and state for the block with the given `number`. +fn calculate_head_and_state_for_number(number: u64) -> (HeadData, u64) { + let mut head = HeadData { + number: 0, + parent_hash: Default::default(), + post_state: hash_state(0), + }; + + let mut state = 0u64; + + while head.number < number { + let block = BlockData { state, add: ADD }; + head = execute(head.hash(), head.clone(), &block).expect("Produces valid block"); + state = state.wrapping_add(ADD); + } + + (head, state) +} + +/// The state of the adder parachain. +struct State { + head_to_state: HashMap, u64>, + number_to_head: HashMap>, + /// Block number of the best block. + best_block: u64, +} + +impl State { + /// Init the genesis state. + fn genesis() -> Self { + let genesis_state = Arc::new(calculate_head_and_state_for_number(0).0); + + Self { + head_to_state: vec![(genesis_state.clone(), 0)].into_iter().collect(), + number_to_head: vec![(0, genesis_state)].into_iter().collect(), + best_block: 0, + } + } + + /// Advance the state and produce a new block based on the given `parent_head`. + /// + /// Returns the new [`BlockData`] and the new [`HeadData`]. + fn advance(&mut self, parent_head: HeadData) -> (BlockData, HeadData) { + self.best_block = parent_head.number; + + let block = BlockData { + state: self + .head_to_state + .get(&parent_head) + .copied() + .unwrap_or_else(|| calculate_head_and_state_for_number(parent_head.number).1), + add: ADD, + }; + + let new_head = execute(parent_head.hash(), parent_head, &block).expect("Produces valid block"); + + let new_head_arc = Arc::new(new_head.clone()); + self.head_to_state + .insert(new_head_arc.clone(), block.state.wrapping_add(ADD)); + self.number_to_head.insert(new_head.number, new_head_arc); + + (block, new_head) + } +} + +/// The collator of the adder parachain. +pub struct Collator { + state: Arc>, + key: CollatorPair, +} + +impl Collator { + /// Create a new collator instance with the state initialized as genesis. + pub fn new() -> Self { + Self { + state: Arc::new(Mutex::new(State::genesis())), + key: CollatorPair::generate().0, + } + } + + /// Get the SCALE encoded genesis head of the adder parachain. + pub fn genesis_head(&self) -> Vec { + self.state + .lock() + .unwrap() + .number_to_head + .get(&0) + .expect("Genesis header exists") + .encode() + } + + /// Get the validation code of the adder parachain. + pub fn validation_code(&self) -> &[u8] { + test_parachain_adder::wasm_binary_unwrap() + } + + /// Get the collator key. + pub fn collator_key(&self) -> CollatorPair { + self.key.clone() + } + + /// Get the collator id. + pub fn collator_id(&self) -> CollatorId { + self.key.public() + } + + /// Create the collation function. + /// + /// This collation function can be plugged into the overseer to generate collations for the adder parachain. + pub fn create_collation_function(&self) -> CollatorFn { + use futures::FutureExt as _; + + let state = self.state.clone(); + + Box::new(move |relay_parent, validation_data| { + let parent = HeadData::decode(&mut &validation_data.persisted.parent_head.0[..]) + .expect("Decodes parent head"); + + let (block_data, head_data) = state.lock().unwrap().advance(parent); + + log::info!( + "created a new collation on relay-parent({}): {:?}", + relay_parent, + block_data, + ); + + let collation = Collation { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: head_data.encode().into(), + proof_of_validity: PoV { + block_data: block_data.encode().into(), + }, + processed_downward_messages: 0, + hrmp_watermark: validation_data.persisted.block_number, + }; + + async move { Some(collation) }.boxed() + }) + } + + /// Wait until `blocks` are built and enacted. + pub async fn wait_for_blocks(&self, blocks: u64) { + let start_block = self.state.lock().unwrap().best_block; + loop { + Delay::new(Duration::from_secs(1)).await; + + let current_block = self.state.lock().unwrap().best_block; + + if start_block + blocks <= current_block { + return; + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use futures::executor::block_on; + use polkadot_parachain::{primitives::ValidationParams, wasm_executor::IsolationStrategy}; + use polkadot_primitives::v1::{PersistedValidationData, ValidationData}; + + #[test] + fn collator_works() { + let collator = Collator::new(); + let collation_function = collator.create_collation_function(); + + for i in 0..5 { + let parent_head = collator + .state + .lock() + .unwrap() + .number_to_head + .get(&i) + .unwrap() + .clone(); + + let validation_data = ValidationData { + persisted: PersistedValidationData { + parent_head: parent_head.encode().into(), + ..Default::default() + }, + ..Default::default() + }; + + let collation = + block_on(collation_function(Default::default(), &validation_data)).unwrap(); + validate_collation(&collator, (*parent_head).clone(), collation); + } + } + + fn validate_collation(collator: &Collator, parent_head: HeadData, collation: Collation) { + let ret = polkadot_parachain::wasm_executor::validate_candidate( + collator.validation_code(), + ValidationParams { + parent_head: parent_head.encode().into(), + block_data: collation.proof_of_validity.block_data, + relay_chain_height: 1, + hrmp_mqc_heads: Vec::new(), + dmq_mqc_head: Default::default(), + }, + &IsolationStrategy::InProcess, + sp_core::testing::TaskExecutor::new(), + ) + .unwrap(); + + let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); + assert_eq!( + **collator + .state + .lock() + .unwrap() + .number_to_head + .get(&(parent_head.number + 1)) + .unwrap(), + new_head + ); + } + + #[test] + fn advance_to_state_when_parent_head_is_missing() { + let collator = Collator::new(); + + let mut head = calculate_head_and_state_for_number(10).0; + + for i in 1..10 { + head = collator.state.lock().unwrap().advance(head).1; + assert_eq!(10 + i, head.number); + } + + let collator = Collator::new(); + let mut second_head = collator.state.lock().unwrap().number_to_head.get(&0).cloned().unwrap().as_ref().clone(); + + for _ in 1..20 { + second_head = collator.state.lock().unwrap().advance(second_head.clone()).1; + } + + assert_eq!(second_head, head); + } +} diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..03b63989cc75b8cac14343ebb56f0588020c5ad0 --- /dev/null +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -0,0 +1,98 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Collator for the adder test parachain. + +use polkadot_node_primitives::CollationGenerationConfig; +use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; +use polkadot_primitives::v1::Id as ParaId; +use sc_cli::{Result, Role, SubstrateCli}; +use sp_core::hexdisplay::HexDisplay; +use test_parachain_adder_collator::Collator; + +/// The parachain ID to collate for in case it wasn't set explicitly through CLI. +const DEFAULT_PARA_ID: ParaId = ParaId::new(100); + +mod cli; +use cli::Cli; + +fn main() -> Result<()> { + let cli = Cli::from_args(); + + match cli.subcommand { + Some(cli::Subcommand::ExportGenesisState(_params)) => { + let collator = Collator::new(); + println!("0x{:?}", HexDisplay::from(&collator.genesis_head())); + + Ok(()) + } + Some(cli::Subcommand::ExportGenesisWasm(_params)) => { + let collator = Collator::new(); + println!("0x{:?}", HexDisplay::from(&collator.validation_code())); + + Ok(()) + } + None => { + let runner = cli.create_runner(&cli.run.base)?; + + runner.run_node_until_exit(|config| async move { + let role = config.role.clone(); + + match role { + Role::Light => Err("Light client not supported".into()), + _ => { + let collator = Collator::new(); + + let full_node = polkadot_service::build_full( + config, + polkadot_service::IsCollator::Yes(collator.collator_id()), + None, + )?; + let mut overseer_handler = full_node + .overseer_handler + .expect("Overseer handler should be initialized for collators"); + + let genesis_head_hex = + format!("0x{:?}", HexDisplay::from(&collator.genesis_head())); + let validation_code_hex = + format!("0x{:?}", HexDisplay::from(&collator.validation_code())); + + let para_id = cli.run.parachain_id.map(ParaId::from).unwrap_or(DEFAULT_PARA_ID); + + log::info!("Running adder collator for parachain id: {}", para_id); + log::info!("Genesis state: {}", genesis_head_hex); + log::info!("Validation code: {}", validation_code_hex); + + let config = CollationGenerationConfig { + key: collator.collator_key(), + collator: collator.create_collation_function(), + para_id, + }; + overseer_handler + .send_msg(CollationGenerationMessage::Initialize(config)) + .await; + + overseer_handler + .send_msg(CollatorProtocolMessage::CollateOn(para_id)) + .await; + + Ok(full_node.task_manager) + } + } + }) + } + } +} diff --git a/parachain/test-parachains/adder/collator/tests/integration.rs b/parachain/test-parachains/adder/collator/tests/integration.rs new file mode 100644 index 0000000000000000000000000000000000000000..6754c6a43734a679ebd6da677fe82186a1bb0659 --- /dev/null +++ b/parachain/test-parachains/adder/collator/tests/integration.rs @@ -0,0 +1,74 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Integration test that ensures that we can build and include parachain +//! blocks of the adder parachain. + +// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled. +#[substrate_test_utils::test] +#[cfg(feature = "real-overseer")] +async fn collating_using_adder_collator(task_executor: sc_service::TaskExecutor) { + use sp_keyring::AccountKeyring::*; + use futures::join; + use polkadot_primitives::v1::Id as ParaId; + + sc_cli::init_logger("", Default::default(), None, false).expect("Sets up logger"); + + let para_id = ParaId::from(100); + + // start alice + let alice = polkadot_test_service::run_validator_node(task_executor.clone(), Alice, || {}, vec![]); + + // start bob + let bob = polkadot_test_service::run_validator_node( + task_executor.clone(), + Bob, + || {}, + vec![alice.addr.clone()], + ); + + let collator = test_parachain_adder_collator::Collator::new(); + + // register parachain + alice + .register_parachain( + para_id, + collator.validation_code().to_vec(), + collator.genesis_head(), + ) + .await + .unwrap(); + + // run the collator node + let mut charlie = polkadot_test_service::run_collator_node( + task_executor.clone(), + Charlie, + || {}, + vec![alice.addr.clone(), bob.addr.clone()], + collator.collator_id(), + ); + + charlie.register_collator(collator.collator_key(), para_id, collator.create_collation_function()).await; + + // Wait until the parachain has 4 blocks produced. + collator.wait_for_blocks(4).await; + + join!( + alice.task_manager.clean_shutdown(), + bob.task_manager.clean_shutdown(), + charlie.task_manager.clean_shutdown(), + ); +} diff --git a/parachain/test-parachains/adder/src/lib.rs b/parachain/test-parachains/adder/src/lib.rs index 7ccba8400efbb07eaadddfcd284ab0b60e94858b..37208efbca1b78ec19c5ce60955c2ad652944abd 100644 --- a/parachain/test-parachains/adder/src/lib.rs +++ b/parachain/test-parachains/adder/src/lib.rs @@ -20,7 +20,8 @@ #![cfg_attr(not(feature = "std"), feature(core_intrinsics, lang_items, core_panic_info, alloc_error_handler))] -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; +use tiny_keccak::{Hasher as _, Keccak}; #[cfg(not(feature = "std"))] mod wasm_validation; @@ -33,15 +34,23 @@ static ALLOC: dlmalloc::GlobalDlmalloc = dlmalloc::GlobalDlmalloc; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -#[cfg(feature = "std")] +fn keccak256(input: &[u8]) -> [u8; 32] { + let mut out = [0u8; 32]; + let mut keccak256 = Keccak::v256(); + keccak256.update(input); + keccak256.finalize(&mut out); + out +} + /// Wasm binary unwrapped. If built with `BUILD_DUMMY_WASM_BINARY`, the function panics. +#[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { WASM_BINARY.expect("Development wasm binary is not available. Testing is only \ supported with the flag disabled.") } /// Head data for this parachain. -#[derive(Default, Clone, Hash, Eq, PartialEq, Encode, Decode)] +#[derive(Default, Clone, Hash, Eq, PartialEq, Encode, Decode, Debug)] pub struct HeadData { /// Block number pub number: u64, @@ -53,21 +62,21 @@ pub struct HeadData { impl HeadData { pub fn hash(&self) -> [u8; 32] { - tiny_keccak::keccak256(&self.encode()) + keccak256(&self.encode()) } } /// Block data for this parachain. -#[derive(Default, Clone, Encode, Decode)] +#[derive(Default, Clone, Encode, Decode, Debug)] pub struct BlockData { /// State to begin from. pub state: u64, - /// Amount to add (overflowing) + /// Amount to add (wrapping) pub add: u64, } pub fn hash_state(state: u64) -> [u8; 32] { - tiny_keccak::keccak256(state.encode().as_slice()) + keccak256(state.encode().as_slice()) } /// Start state mismatched with parent header's state hash. @@ -81,13 +90,13 @@ pub fn execute( parent_head: HeadData, block_data: &BlockData, ) -> Result { - debug_assert_eq!(parent_hash, parent_head.hash()); + assert_eq!(parent_hash, parent_head.hash()); if hash_state(block_data.state) != parent_head.post_state { return Err(StateMismatch); } - let new_state = block_data.state.overflowing_add(block_data.add).0; + let new_state = block_data.state.wrapping_add(block_data.add); Ok(HeadData { number: parent_head.number + 1, diff --git a/parachain/test-parachains/adder/src/wasm_validation.rs b/parachain/test-parachains/adder/src/wasm_validation.rs index c0f3b56dc8e49bfa588a7f75099c952e62eff07b..f7e46cad391bc16fcc04e04f0072ece369ffbb6b 100644 --- a/parachain/test-parachains/adder/src/wasm_validation.rs +++ b/parachain/test-parachains/adder/src/wasm_validation.rs @@ -17,12 +17,13 @@ //! WASM validation for adder parachain. use crate::{HeadData, BlockData}; -use core::{intrinsics, panic}; +use core::panic; +use sp_std::vec::Vec; use parachain::primitives::{ValidationResult, HeadData as GenericHeadData}; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; #[no_mangle] -pub extern fn validate_block(params: *const u8, len: usize) -> u64 { +pub extern "C" fn validate_block(params: *const u8, len: usize) -> u64 { let params = unsafe { parachain::load_params(params, len) }; let parent_head = HeadData::decode(&mut ¶ms.parent_head.0[..]) .expect("invalid parent head format."); @@ -30,17 +31,17 @@ pub extern fn validate_block(params: *const u8, len: usize) -> u64 { let block_data = BlockData::decode(&mut ¶ms.block_data.0[..]) .expect("invalid block data format."); - let parent_hash = tiny_keccak::keccak256(¶ms.parent_head.0[..]); - - match crate::execute(parent_hash, parent_head, &block_data) { - Ok(new_head) => parachain::write_result( - &ValidationResult { - head_data: GenericHeadData(new_head.encode()), - new_validation_code: None, - upward_messages: sp_std::vec::Vec::new(), - processed_downward_messages: 0, - } - ), - Err(_) => panic!("execution failure"), - } + let parent_hash = crate::keccak256(¶ms.parent_head.0[..]); + + let new_head = crate::execute(parent_hash, parent_head, &block_data).expect("Executes block"); + parachain::write_result( + &ValidationResult { + head_data: GenericHeadData(new_head.encode()), + new_validation_code: None, + upward_messages: sp_std::vec::Vec::new(), + horizontal_messages: sp_std::vec::Vec::new(), + processed_downward_messages: 0, + hrmp_watermark: params.relay_chain_height, + } + ) } diff --git a/parachain/test-parachains/adder/wasm/Cargo.toml b/parachain/test-parachains/adder/wasm/Cargo.toml deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/parachain/test-parachains/halt/Cargo.toml b/parachain/test-parachains/halt/Cargo.toml index f91e1d66a5b558a292c233cd7505137e9a103060..d9fff249bc9926fb1f3acc123bc77c1abf978dcb 100644 --- a/parachain/test-parachains/halt/Cargo.toml +++ b/parachain/test-parachains/halt/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-parachain-halt" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] description = "Test parachain which executes forever" edition = "2018" @@ -9,7 +9,7 @@ build = "build.rs" [dependencies] [build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } +substrate-wasm-builder = "3.0.0" [features] default = [ "std" ] diff --git a/parachain/test-parachains/halt/build.rs b/parachain/test-parachains/halt/build.rs index 6ed2a43655794c07f4c875c848dd93be24f6dc29..ac1ce327cf9086d6305b3bfb1da3b547df7c28e9 100644 --- a/parachain/test-parachains/halt/build.rs +++ b/parachain/test-parachains/halt/build.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .export_heap_base() .build() } diff --git a/parachain/test-parachains/tests/adder/mod.rs b/parachain/test-parachains/tests/adder/mod.rs index b0d905988c19dffa2a51b1702b7e58e6e8da43e7..8666cf365a350a5830f2ae7fcf63549dc26280fa 100644 --- a/parachain/test-parachains/tests/adder/mod.rs +++ b/parachain/test-parachains/tests/adder/mod.rs @@ -25,40 +25,13 @@ use parachain::{ HeadData as GenericHeadData, ValidationParams, }, - wasm_executor::{ValidationPool, ExecutionMode} + wasm_executor::{ValidationPool, IsolationStrategy} }; -use codec::{Decode, Encode}; - -/// Head data for this parachain. -#[derive(Default, Clone, Encode, Decode)] -struct HeadData { - /// Block number - number: u64, - /// parent block keccak256 - parent_hash: [u8; 32], - /// hash of post-execution state. - post_state: [u8; 32], -} - -/// Block data for this parachain. -#[derive(Default, Clone, Encode, Decode)] -struct BlockData { - /// State to begin from. - state: u64, - /// Amount to add (overflowing) - add: u64, -} - -fn hash_state(state: u64) -> [u8; 32] { - tiny_keccak::keccak256(state.encode().as_slice()) -} - -fn hash_head(head: &HeadData) -> [u8; 32] { - tiny_keccak::keccak256(head.encode().as_slice()) -} +use parity_scale_codec::{Decode, Encode}; +use adder::{HeadData, BlockData, hash_state}; -fn execution_mode() -> ExecutionMode { - ExecutionMode::ExternalProcessCustomHost { +fn isolation_strategy() -> IsolationStrategy { + IsolationStrategy::ExternalProcessCustomHost { pool: ValidationPool::new(), binary: std::env::current_exe().unwrap(), args: WORKER_ARGS_TEST.iter().map(|x| x.to_string()).collect(), @@ -67,17 +40,17 @@ fn execution_mode() -> ExecutionMode { #[test] fn execute_good_on_parent_with_inprocess_validation() { - let execution_mode = ExecutionMode::InProcess; - execute_good_on_parent(execution_mode); + let isolation_strategy = IsolationStrategy::InProcess; + execute_good_on_parent(isolation_strategy); } #[test] pub fn execute_good_on_parent_with_external_process_validation() { - let execution_mode = execution_mode(); - execute_good_on_parent(execution_mode); + let isolation_strategy = isolation_strategy(); + execute_good_on_parent(isolation_strategy); } -fn execute_good_on_parent(execution_mode: ExecutionMode) { +fn execute_good_on_parent(isolation_strategy: IsolationStrategy) { let parent_head = HeadData { number: 0, parent_hash: [0; 32], @@ -89,7 +62,6 @@ fn execute_good_on_parent(execution_mode: ExecutionMode) { add: 512, }; - let ret = parachain::wasm_executor::validate_candidate( adder::wasm_binary_unwrap(), ValidationParams { @@ -99,14 +71,14 @@ fn execute_good_on_parent(execution_mode: ExecutionMode) { hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), }, - &execution_mode, + &isolation_strategy, sp_core::testing::TaskExecutor::new(), ).unwrap(); let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); assert_eq!(new_head.number, 1); - assert_eq!(new_head.parent_hash, hash_head(&parent_head)); + assert_eq!(new_head.parent_hash, parent_head.hash()); assert_eq!(new_head.post_state, hash_state(512)); } @@ -115,7 +87,7 @@ fn execute_good_chain_on_parent() { let mut number = 0; let mut parent_hash = [0; 32]; let mut last_state = 0; - let execution_mode = execution_mode(); + let isolation_strategy = isolation_strategy(); for add in 0..10 { let parent_head = HeadData { @@ -138,25 +110,25 @@ fn execute_good_chain_on_parent() { hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), }, - &execution_mode, + &isolation_strategy, sp_core::testing::TaskExecutor::new(), ).unwrap(); let new_head = HeadData::decode(&mut &ret.head_data.0[..]).unwrap(); assert_eq!(new_head.number, number + 1); - assert_eq!(new_head.parent_hash, hash_head(&parent_head)); + assert_eq!(new_head.parent_hash, parent_head.hash()); assert_eq!(new_head.post_state, hash_state(last_state + add)); number += 1; - parent_hash = hash_head(&new_head); + parent_hash = new_head.hash(); last_state += add; } } #[test] fn execute_bad_on_parent() { - let execution_mode = execution_mode(); + let isolation_strategy = isolation_strategy(); let parent_head = HeadData { number: 0, @@ -178,7 +150,7 @@ fn execute_bad_on_parent() { hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), }, - &execution_mode, + &isolation_strategy, sp_core::testing::TaskExecutor::new(), ).unwrap_err(); } diff --git a/parachain/test-parachains/tests/wasm_executor/mod.rs b/parachain/test-parachains/tests/wasm_executor/mod.rs index ad60e4e04e320ced72dcd337f7c98c75920e861f..e092adc2f47ee7fb78f796c0e0fa8323d391380e 100644 --- a/parachain/test-parachains/tests/wasm_executor/mod.rs +++ b/parachain/test-parachains/tests/wasm_executor/mod.rs @@ -21,11 +21,11 @@ const WORKER_ARGS_TEST: &[&'static str] = &["--nocapture", "validation_worker"]; use crate::adder; use parachain::{ primitives::{BlockData, ValidationParams}, - wasm_executor::{ValidationError, InvalidCandidate, EXECUTION_TIMEOUT_SEC, ExecutionMode, ValidationPool}, + wasm_executor::{ValidationError, InvalidCandidate, EXECUTION_TIMEOUT_SEC, IsolationStrategy, ValidationPool}, }; -fn execution_mode() -> ExecutionMode { - ExecutionMode::ExternalProcessCustomHost { +fn isolation_strategy() -> IsolationStrategy { + IsolationStrategy::ExternalProcessCustomHost { pool: ValidationPool::new(), binary: std::env::current_exe().unwrap(), args: WORKER_ARGS_TEST.iter().map(|x| x.to_string()).collect(), @@ -34,7 +34,7 @@ fn execution_mode() -> ExecutionMode { #[test] fn terminates_on_timeout() { - let execution_mode = execution_mode(); + let isolation_strategy = isolation_strategy(); let result = parachain::wasm_executor::validate_candidate( halt::wasm_binary_unwrap(), @@ -45,7 +45,7 @@ fn terminates_on_timeout() { hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), }, - &execution_mode, + &isolation_strategy, sp_core::testing::TaskExecutor::new(), ); match result { @@ -59,11 +59,10 @@ fn terminates_on_timeout() { #[test] fn parallel_execution() { - let execution_mode = execution_mode(); + let isolation_strategy = isolation_strategy(); + let isolation_strategy_clone = isolation_strategy.clone(); let start = std::time::Instant::now(); - - let execution_mode2 = execution_mode.clone(); let thread = std::thread::spawn(move || parachain::wasm_executor::validate_candidate( halt::wasm_binary_unwrap(), @@ -74,7 +73,7 @@ fn parallel_execution() { hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), }, - &execution_mode, + &isolation_strategy, sp_core::testing::TaskExecutor::new(), ).ok()); let _ = parachain::wasm_executor::validate_candidate( @@ -86,7 +85,7 @@ fn parallel_execution() { hrmp_mqc_heads: Vec::new(), dmq_mqc_head: Default::default(), }, - &execution_mode2, + &isolation_strategy_clone, sp_core::testing::TaskExecutor::new(), ); thread.join().unwrap(); diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index ca0f630216c850bf5fe23e08e5ec0216fdc28979..655d7124c236f2280e3df309af42d3a4471c8529 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -1,32 +1,32 @@ [package] name = "polkadot-primitives" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] -serde = { version = "1.0.102", optional = true, features = ["derive"] } -parity-scale-codec = { version = "1.3.4", default-features = false, features = ["bit-vec", "derive"] } -primitives = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -application-crypto = { package = "sp-application-crypto", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +serde = { version = "1.0.118", optional = true, features = ["derive"] } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["bit-vec", "derive"] } +primitives = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +application-crypto = { package = "sp-application-crypto", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } polkadot-parachain = { path = "../parachain", default-features = false } polkadot-core-primitives = { path = "../core-primitives", default-features = false } -trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -frame-system = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } [dev-dependencies] -sp-serializer = { git = "https://github.com/paritytech/substrate", branch = "master" } -pretty_assertions = "0.5.1" +sp-serializer = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pretty_assertions = "0.6.1" [features] default = ["std"] diff --git a/primitives/src/v0.rs b/primitives/src/v0.rs index c058d22dbc565562017a0171e5c6373a8c62a663..5f6e3ad544a98f3f58d5df0c6a232c6c2e963786 100644 --- a/primitives/src/v0.rs +++ b/primitives/src/v0.rs @@ -41,7 +41,7 @@ pub use polkadot_core_primitives::*; pub use parity_scale_codec::Compact; pub use polkadot_parachain::primitives::{ - Id, ParachainDispatchOrigin, LOWEST_USER_ID, UpwardMessage, HeadData, BlockData, + Id, LOWEST_USER_ID, UpwardMessage, HeadData, BlockData, ValidationCode, }; @@ -633,18 +633,18 @@ pub struct ErasureChunk { pub enum CompactStatement { /// Proposal of a parachain candidate. #[codec(index = "1")] - Candidate(Hash), + Candidate(CandidateHash), /// State that a parachain candidate is valid. #[codec(index = "2")] - Valid(Hash), + Valid(CandidateHash), /// State that a parachain candidate is invalid. #[codec(index = "3")] - Invalid(Hash), + Invalid(CandidateHash), } impl CompactStatement { /// Get the underlying candidate hash this references. - pub fn candidate_hash(&self) -> &Hash { + pub fn candidate_hash(&self) -> &CandidateHash { match *self { CompactStatement::Candidate(ref h) | CompactStatement::Valid(ref h) @@ -684,7 +684,7 @@ impl ValidityAttestation { /// which should be known in context. pub fn signed_payload( &self, - candidate_hash: Hash, + candidate_hash: CandidateHash, signing_context: &SigningContext, ) -> Vec { match *self { diff --git a/primitives/src/v1.rs b/primitives/src/v1.rs index 66a93a6bece8127d4962df1d8195f3529c639caa..18a84a9b61da7a300cd46803d7fc19b61ecc6d6e 100644 --- a/primitives/src/v1.rs +++ b/primitives/src/v1.rs @@ -17,27 +17,28 @@ //! V1 Primitives. use sp_std::prelude::*; +use sp_std::collections::btree_map::BTreeMap; use parity_scale_codec::{Encode, Decode}; use bitvec::vec::BitVec; use primitives::RuntimeDebug; use runtime_primitives::traits::AppVerify; use inherents::InherentIdentifier; -use sp_arithmetic::traits::{BaseArithmetic, Saturating, Zero}; +use sp_arithmetic::traits::{BaseArithmetic, Saturating}; +use application_crypto::KeyTypeId; pub use runtime_primitives::traits::{BlakeTwo256, Hash as HashT}; // Export some core primitives. pub use polkadot_core_primitives::v1::{ - BlockNumber, Moment, Signature, AccountPublic, AccountId, AccountIndex, - ChainId, Hash, Nonce, Balance, Header, Block, BlockId, UncheckedExtrinsic, - Remark, DownwardMessage, InboundDownwardMessage, + BlockNumber, Moment, Signature, AccountPublic, AccountId, AccountIndex, ChainId, Hash, Nonce, + Balance, Header, Block, BlockId, UncheckedExtrinsic, Remark, DownwardMessage, + InboundDownwardMessage, CandidateHash, InboundHrmpMessage, OutboundHrmpMessage, }; // Export some polkadot-parachain primitives pub use polkadot_parachain::primitives::{ - Id, ParachainDispatchOrigin, LOWEST_USER_ID, UpwardMessage, HeadData, BlockData, - ValidationCode, + Id, LOWEST_USER_ID, HrmpChannelId, UpwardMessage, HeadData, BlockData, ValidationCode, }; // Export some basic parachain primitives from v0. @@ -57,6 +58,34 @@ pub use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; /// Unique identifier for the Inclusion Inherent pub const INCLUSION_INHERENT_IDENTIFIER: InherentIdentifier = *b"inclusn0"; + +/// The key type ID for a parachain approval voting key. +pub const APPROVAL_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"aprv"); + +mod approval_app { + use application_crypto::{app_crypto, sr25519}; + app_crypto!(sr25519, super::APPROVAL_KEY_TYPE_ID); +} + +/// The public key of a keypair used by a validator for approval voting +/// on included parachain candidates. +pub type ApprovalId = approval_app::Public; + +/// The key type ID for parachain assignment key. +pub const ASSIGNMENT_KEY_TYPE_ID: KeyTypeId = KeyTypeId(*b"asgn"); + +// The public key of a keypair used by a validator for determining assignments +/// to approve included parachain candidates. +mod assigment_app { + use application_crypto::{app_crypto, sr25519}; + app_crypto!(sr25519, super::ASSIGNMENT_KEY_TYPE_ID); +} + +/// The public key of a keypair used by a validator for determining assignments +/// to approve included parachain candidates. +pub type AssignmentId = assigment_app::Public; + + /// Get a collator signature payload on a relay-parent, block-data combo. pub fn collator_signature_payload>( relay_parent: &H, @@ -113,6 +142,8 @@ pub struct CandidateDescriptor { pub persisted_validation_data_hash: Hash, /// The blake2-256 hash of the pov. pub pov_hash: Hash, + /// The root of a block's erasure encoding Merkle tree. + pub erasure_root: Hash, /// Signature on blake2-256 of components of this receipt: /// The parachain index, the relay parent, the validation data hash, and the pov_hash. pub signature: CollatorSignature, @@ -149,8 +180,8 @@ impl CandidateReceipt { } /// Computes the blake2-256 hash of the receipt. - pub fn hash(&self) -> Hash where H: Encode { - BlakeTwo256::hash_of(self) + pub fn hash(&self) -> CandidateHash where H: Encode { + CandidateHash(BlakeTwo256::hash_of(self)) } } @@ -197,9 +228,14 @@ impl CommittedCandidateReceipt { /// /// This computes the canonical hash, not the hash of the directly encoded data. /// Thus this is a shortcut for `candidate.to_plain().hash()`. - pub fn hash(&self) -> Hash where H: Encode { + pub fn hash(&self) -> CandidateHash where H: Encode { self.to_plain().hash() } + + /// Does this committed candidate receipt corrensponds to the given [`CandidateReceipt`]? + pub fn corresponds_to(&self, receipt: &CandidateReceipt) -> bool where H: PartialEq { + receipt.descriptor == self.descriptor && receipt.commitments_hash == self.commitments.hash() + } } impl PartialOrd for CommittedCandidateReceipt { @@ -271,6 +307,8 @@ pub struct PersistedValidationData { /// The DMQ MQC head will be used by the validation function to authorize the downward messages /// passed by the collator. pub dmq_mqc_head: Hash, + /// The maximum legal size of a POV block, in bytes. + pub max_pov_size: u32, } impl PersistedValidationData { @@ -310,34 +348,22 @@ pub struct TransientValidationData { pub dmq_length: u32, } -/// Outputs of validating a candidate. -#[derive(Encode, Decode)] -#[cfg_attr(feature = "std", derive(Clone, Debug, Default))] -pub struct ValidationOutputs { - /// The head-data produced by validation. - pub head_data: HeadData, - /// Upward messages to the relay chain. - pub upward_messages: Vec, - /// The new validation code submitted by the execution, if any. - pub new_validation_code: Option, - /// The number of messages processed from the DMQ. - pub processed_downward_messages: u32, -} - /// Commitments made in a `CandidateReceipt`. Many of these are outputs of validation. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Default, Hash))] -pub struct CandidateCommitments { +pub struct CandidateCommitments { /// Messages destined to be interpreted by the Relay chain itself. pub upward_messages: Vec, - /// The root of a block's erasure encoding Merkle tree. - pub erasure_root: Hash, + /// Horizontal messages sent by the parachain. + pub horizontal_messages: Vec>, /// New validation code. pub new_validation_code: Option, /// The head-data produced as a result of execution. pub head_data: HeadData, /// The number of messages processed from the DMQ. pub processed_downward_messages: u32, + /// The mark which specifies the block number up to which all inbound HRMP messages are processed. + pub hrmp_watermark: N, } impl CandidateCommitments { @@ -395,6 +421,16 @@ impl BackedCandidate { pub fn descriptor(&self) -> &CandidateDescriptor { &self.candidate.descriptor } + + /// Compute this candidate's hash. + pub fn hash(&self) -> CandidateHash where H: Clone + Encode { + self.candidate.hash() + } + + /// Get this candidate's receipt. + pub fn receipt(&self) -> CandidateReceipt where H: Clone { + self.candidate.to_plain() + } } /// Verify the backing of the given candidate. @@ -422,7 +458,7 @@ pub fn check_candidate_backing + Clone + Encode>( } // this is known, even in runtime, to be blake2-256. - let hash: Hash = backed.candidate.hash(); + let hash = backed.candidate.hash(); let mut signed = 0; for ((val_in_group_idx, _), attestation) in backed.validator_indices.iter().enumerate() @@ -459,8 +495,8 @@ impl From for CoreIndex { } /// The unique (during session) index of a validator group. -#[derive(Encode, Decode, Default, Clone, Copy)] -#[cfg_attr(feature = "std", derive(Eq, Hash, PartialEq, Debug))] +#[derive(Encode, Decode, Default, Clone, Copy, Debug)] +#[cfg_attr(feature = "std", derive(Eq, Hash, PartialEq))] pub struct GroupIndex(pub u32); impl From for GroupIndex { @@ -495,11 +531,11 @@ pub enum CoreOccupied { } /// This is the data we keep available for each candidate included in the relay chain. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[cfg(feature = "std")] +#[derive(Clone, Encode, Decode, PartialEq, Debug)] pub struct AvailableData { /// The Proof-of-Validation of the candidate. - pub pov: PoV, + pub pov: std::sync::Arc, /// The persisted validation data needed for secondary checks. pub validation_data: PersistedValidationData, } @@ -537,11 +573,7 @@ impl GroupRotationInfo { impl GroupRotationInfo { /// Returns the block number of the next rotation after the current block. If the current block /// is 10 and the rotation frequency is 5, this should return 15. - /// - /// If the group rotation frequency is 0, returns 0. pub fn next_rotation_at(&self) -> N { - if self.group_rotation_frequency.is_zero() { return Zero::zero() } - let cycle_once = self.now + self.group_rotation_frequency; cycle_once - ( cycle_once.saturating_sub(self.session_start_block) % self.group_rotation_frequency @@ -550,10 +582,7 @@ impl GroupRotationInfo { /// Returns the block number of the last rotation before or including the current block. If the /// current block is 10 and the rotation frequency is 5, this should return 10. - /// - /// If the group rotation frequency is 0, returns 0. pub fn last_rotation_at(&self) -> N { - if self.group_rotation_frequency.is_zero() { return Zero::zero() } self.now - ( self.now.saturating_sub(self.session_start_block) % self.group_rotation_frequency ) @@ -561,8 +590,8 @@ impl GroupRotationInfo { } /// Information about a core which is currently occupied. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[derive(Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(PartialEq))] pub struct OccupiedCore { /// The ID of the para occupying the core. pub para_id: Id, @@ -586,8 +615,8 @@ pub struct OccupiedCore { } /// Information about a core which is currently occupied. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug, Default))] +#[derive(Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(PartialEq, Default))] pub struct ScheduledCore { /// The ID of a para scheduled. pub para_id: Id, @@ -596,8 +625,8 @@ pub struct ScheduledCore { } /// The state of a particular availability core. -#[derive(Clone, Encode, Decode)] -#[cfg_attr(feature = "std", derive(PartialEq, Debug))] +#[derive(Clone, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(PartialEq))] pub enum CoreState { /// The core is currently occupied. #[codec(index = "0")] @@ -624,6 +653,11 @@ impl CoreState { Self::Free => None, } } + + /// Is this core state `Self::Occupied`? + pub fn is_occupied(&self) -> bool { + matches!(self, Self::Occupied(_)) + } } /// An assumption being made about the state of an occupied core. @@ -656,9 +690,38 @@ pub enum CandidateEvent { CandidateTimedOut(CandidateReceipt, HeadData), } +/// Information about validator sets of a session. +#[derive(Clone, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(PartialEq, Default))] +pub struct SessionInfo { + /// Validators in canonical ordering. + pub validators: Vec, + /// Validators' authority discovery keys for the session in canonical ordering. + pub discovery_keys: Vec, + /// The assignment and approval keys for validators. + pub approval_keys: Vec<(ApprovalId, AssignmentId)>, + /// Validators in shuffled ordering - these are the validator groups as produced + /// by the `Scheduler` module for the session and are typically referred to by + /// `GroupIndex`. + pub validator_groups: Vec>, + /// The number of availability cores used by the protocol during this session. + pub n_cores: u32, + /// The zeroth delay tranche width. + pub zeroth_delay_tranche_width: u32, + /// The number of samples we do of relay_vrf_modulo. + pub relay_vrf_modulo_samples: u32, + /// The number of delay tranches in total. + pub n_delay_tranches: u32, + /// How many slots (BABE / SASSAFRAS) must pass before an assignment is considered a + /// no-show. + pub no_show_slots: u32, + /// The number of validators needed to approve a block. + pub needed_approvals: u32, +} + sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. - pub trait ParachainHost { + pub trait ParachainHost { /// Get the current validators. fn validators() -> Vec; @@ -687,15 +750,17 @@ sp_api::decl_runtime_apis! { fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) -> Option>; - // TODO: Adding a Runtime API should be backwards compatible... right? /// Checks if the given validation outputs pass the acceptance criteria. - fn check_validation_outputs(para_id: Id, outputs: ValidationOutputs) -> bool; + fn check_validation_outputs(para_id: Id, outputs: CandidateCommitments) -> bool; /// Returns the session index expected at a child of the block. /// /// This can be used to instantiate a `SigningContext`. fn session_index_for_child() -> SessionIndex; + /// Get the session info for the given session, if stored. + fn session_info(index: SessionIndex) -> Option; + /// Fetch the validation code used by a para, making the given `OccupiedCoreAssumption`. /// /// Returns `None` if either the para is not registered or the assumption is `Freed` @@ -703,6 +768,14 @@ sp_api::decl_runtime_apis! { fn validation_code(para_id: Id, assumption: OccupiedCoreAssumption) -> Option; + /// Fetch the historical validation code used by a para for candidates executed in the + /// context of a given block height in the current chain. + /// + /// `context_height` may be no greater than the height of the block in whose + /// state the runtime API is executed. + fn historical_validation_code(para_id: Id, context_height: N) + -> Option; + /// Get the receipt of a candidate pending availability. This returns `Some` for any paras /// assigned to occupied cores in `availability_cores` and `None` otherwise. fn candidate_pending_availability(para_id: Id) -> Option>; @@ -713,17 +786,14 @@ sp_api::decl_runtime_apis! { #[skip_initialize_block] fn candidate_events() -> Vec>; - /// Get the `AuthorityDiscoveryId`s corresponding to the given `ValidatorId`s. - /// Currently this request is limited to validators in the current session. - /// - /// We assume that every validator runs authority discovery, - /// which would allow us to establish point-to-point connection to given validators. - fn validator_discovery(validators: Vec) -> Vec>; - /// Get all the pending inbound messages in the downward message queue for a para. fn dmq_contents( recipient: Id, ) -> Vec>; + + /// Get the contents of all channels addressed to the given recipient. Channels that have no + /// messages in them are also included. + fn inbound_hrmp_channels_contents(recipient: Id) -> BTreeMap>>; } } @@ -760,15 +830,6 @@ mod tests { assert_eq!(info.next_rotation_at(), 20); assert_eq!(info.last_rotation_at(), 15); - - let info = GroupRotationInfo { - session_start_block: 10u32, - now: 11, - group_rotation_frequency: 0, - }; - - assert_eq!(info.next_rotation_at(), 0); - assert_eq!(info.last_rotation_at(), 0); } #[test] diff --git a/roadmap/implementers-guide/src/SUMMARY.md b/roadmap/implementers-guide/src/SUMMARY.md index d51f26f71ea53530420ab12c6c78c58e7e62645b..b6e0fab3be4e252769f0b65fbc7709b2992a9222 100644 --- a/roadmap/implementers-guide/src/SUMMARY.md +++ b/roadmap/implementers-guide/src/SUMMARY.md @@ -15,7 +15,10 @@ - [Scheduler Module](runtime/scheduler.md) - [Inclusion Module](runtime/inclusion.md) - [InclusionInherent Module](runtime/inclusioninherent.md) - - [Router Module](runtime/router.md) + - [DMP Module](runtime/dmp.md) + - [UMP Module](runtime/ump.md) + - [HRMP Module](runtime/hrmp.md) + - [Session Info Module](runtime/session_info.md) - [Runtime APIs](runtime-api/README.md) - [Validators](runtime-api/validators.md) - [Validator Groups](runtime-api/validator-groups.md) @@ -29,7 +32,8 @@ - [Node Architecture](node/README.md) - [Subsystems and Jobs](node/subsystems-and-jobs.md) - [Overseer](node/overseer.md) - - [Collators](node/collators/README.md) + - [GRANDPA Voting Rule](node/grandpa-voting-rule.md) + - [Collator Subsystems](node/collators/README.md) - [Collation Generation](node/collators/collation-generation.md) - [Collator Protocol](node/collators/collator-protocol.md) - [Backing Subsystems](node/backing/README.md) @@ -39,8 +43,13 @@ - [PoV Distribution](node/backing/pov-distribution.md) - [Availability Subsystems](node/availability/README.md) - [Availability Distribution](node/availability/availability-distribution.md) + - [Availability Recovery](node/availability/availability-recovery.md) - [Bitfield Distribution](node/availability/bitfield-distribution.md) - [Bitfield Signing](node/availability/bitfield-signing.md) + - [Approval Subsystems](node/approval/README.md) + - [Approval Voting](node/approval/approval-voting.md) + - [Approval Distribution](node/approval/approval-distribution.md) + - [Dispute Participation](node/approval/dispute-participation.md) - [Utility Subsystems](node/utility/README.md) - [Availability Store](node/utility/availability-store.md) - [Candidate Validation](node/utility/candidate-validation.md) @@ -59,6 +68,7 @@ - [Chain](types/chain.md) - [Messages](types/messages.md) - [Network](types/network.md) + - [Approvals](types/approval.md) [Glossary](glossary.md) [Further Reading](further-reading.md) diff --git a/roadmap/implementers-guide/src/glossary.md b/roadmap/implementers-guide/src/glossary.md index 63294d1d77fd69bd538938a04a505b7cd510fa7a..2dbe2ab14abeb60b1aff9d058b3920b94aa9a7fd 100644 --- a/roadmap/implementers-guide/src/glossary.md +++ b/roadmap/implementers-guide/src/glossary.md @@ -24,6 +24,7 @@ Here you can find definitions of a bunch of jargon, usually specific to the Polk - Parathread: A parachain which is scheduled on a pay-as-you-go basis. - Proof-of-Validity (PoV): A stateless-client proof that a parachain candidate is valid, with respect to some validation function. - Relay Parent: A block in the relay chain, referred to in a context where work is being done in the context of the state at this block. +- Router: The router module is a meta module that consists of three runtime modules responsible for routing messages between paras and the relay chain. The three separate runtime modules are: Dmp, Ump, Hrmp, each responsible for the respective part of message routing. - Runtime: The relay-chain state machine. - Runtime Module: See Module. - Runtime API: A means for the node-side behavior to access structured information based on the state of a fork of the blockchain. diff --git a/roadmap/implementers-guide/src/messaging.md b/roadmap/implementers-guide/src/messaging.md index 1e782e155be17924e455956720af871577a9122e..edc810e034154278e53e78b1e563dcbf299b1613 100644 --- a/roadmap/implementers-guide/src/messaging.md +++ b/roadmap/implementers-guide/src/messaging.md @@ -26,20 +26,28 @@ The downward message queue doesn't have a cap on its size and it is up to the re that prevent spamming in place. Upward Message Passing (UMP) is a mechanism responsible for delivering messages in the opposite direction: -from a parachain up to the relay chain. Upward messages can serve different purposes and can be of different - kinds. +from a parachain up to the relay chain. Upward messages are essentially byte blobs. However, they are interpreted +by the relay-chain according to the XCM standard. -One kind of message is `Dispatchable`. They could be thought of similarly to extrinsics sent to a relay chain: they also -invoke exposed runtime entrypoints, they consume weight and require fees. The difference is that they originate from -a parachain. Each parachain has a queue of dispatchables to be executed. There can be only so many dispatchables at a time. +The XCM standard is a common vocabulary of messages. The XCM standard doesn't require a particular interpretation of +a message. However, the parachains host (e.g. Polkadot) guarantees certain semantics for those. + +Moreover, while most XCM messages are handled by the on-chain XCM interpreter, some of the messages are special +cased. Specifically, those messages can be checked during the acceptance criteria and thus invalid +messages would lead to rejecting the candidate itself. + +One kind of such a message is `Xcm::Transact`. This upward message can be seen as a way for a parachain +to execute arbitrary entrypoints on the relay-chain. `Xcm::Transact` messages resemble regular extrinsics with the exception that they +originate from a parachain. + +The payload of `Xcm::Transact` messages is referred as to `Dispatchable`. When a candidate with such a message is enacted +the dispatchables are put into a queue corresponding to the parachain. There can be only so many dispatchables in that queue at once. The weight that processing of the dispatchables can consume is limited by a preconfigured value. Therefore, it is possible that some dispatchables will be left for later blocks. To make the dispatching more fair, the queues are processed turn-by-turn in a round robin fashion. -Upward messages are also used by a parachain to request opening and closing HRMP channels (HRMP will be described below). - -Other kinds of upward messages can be introduced in the future as well. Potential candidates are -new validation code signalling, or other requests to the relay chain. +The second category of special cased XCM messages are for horizontal messaging channel management, +namely messages meant to request opening and closing HRMP channels (HRMP will be described below). ## Horizontal Message Passing diff --git a/roadmap/implementers-guide/src/node/README.md b/roadmap/implementers-guide/src/node/README.md index f6d7e7a887f7cd505cf5fd0a83e9ef63ae78cb09..44eeb4bf977b338aa25d5775cff91c8e4a69ca6b 100644 --- a/roadmap/implementers-guide/src/node/README.md +++ b/roadmap/implementers-guide/src/node/README.md @@ -10,7 +10,14 @@ The architecture of the node-side behavior aims to embody the Rust principles of Many operations that need to be carried out involve the network, which is asynchronous. This asynchrony affects all core subsystems that rely on the network as well. The approach of hierarchical state machines is well-suited to this kind of environment. -We introduce a hierarchy of state machines consisting of an overseer supervising subsystems, where Subsystems can contain their own internal hierarchy of jobs. This is elaborated on in the next section on Subsystems. +We introduce + +## Components + +The node architecture consists of the following components: + * The Overseer (and subsystems): A hierarchy of state machines where an overseer supervises subsystems. Subsystems can contain their own internal hierarchy of jobs. This is elaborated on in the next section on Subsystems. + * A block proposer: Logic triggered by the consensus algorithm of the chain when the node should author a block. + * A GRANDPA voting rule: A strategy for selecting chains to vote on in the GRANDPA algorithm to ensure that only valid parachain candidates appear in finalized relay-chain blocks. ## Assumptions diff --git a/roadmap/implementers-guide/src/node/approval/README.md b/roadmap/implementers-guide/src/node/approval/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2d0815376728a76185a18d7818b72acb12ae6191 --- /dev/null +++ b/roadmap/implementers-guide/src/node/approval/README.md @@ -0,0 +1,7 @@ +# Approval Subsystems + +The approval subsystems implement the node-side of the [Approval Protocol](../../protocol-approval.md). + +We make a divide between the [assignment/voting logic](approval-voting.md) and the [distribution logic](approval-distribution.md) that distributes assignment certifications and approval votes. The logic in the assignment and voting also informs the GRANDPA voting rule on how to vote. + +This category of subsystems also contains a module for [participating in live disputes](dispute-participation.md) and tracks all observed votes (backing or approval) by all validators on all candidates. \ No newline at end of file diff --git a/roadmap/implementers-guide/src/node/approval/approval-distribution.md b/roadmap/implementers-guide/src/node/approval/approval-distribution.md new file mode 100644 index 0000000000000000000000000000000000000000..6ded8b6db9adf921d233840b6e6ed277084fb757 --- /dev/null +++ b/roadmap/implementers-guide/src/node/approval/approval-distribution.md @@ -0,0 +1,196 @@ +# Approval Distribution + +A subsystem for the distribution of assignments and approvals for approval checks on candidates over the network. + +The [Approval Voting](approval-voting.md) subsystem is responsible for active participation in a protocol designed to select a sufficient number of validators to check each and every candidate which appears in the relay chain. Statements of participation in this checking process are divided into two kinds: + - **Assignments** indicate that validators have been selected to do checking + - **Approvals** indicate that validators have checked and found the candidate satisfactory. + +The [Approval Voting](approval-voting.md) subsystem handles all the issuing and tallying of this protocol, but this subsystem is responsible for the disbursal of statements among the validator-set. + +The inclusion pipeline of candidates concludes after availability, and only after inclusion do candidates actually get pushed into the approval checking pipeline. As such, this protocol deals with the candidates _made available by_ particular blocks, as opposed to the candidates which actually appear within those blocks, which are the candidates _backed by_ those blocks. Unless stated otherwise, whenever we reference a candidate partially by block hash, we are referring to the set of candidates _made available by_ those blocks. + +We implement this protocol as a gossip protocol, and like other parachain-related gossip protocols our primary concerns are about ensuring fast message propagation while maintaining an upper bound on the number of messages any given node must store at any time. + +Approval messages should always follow assignments, so we need to be able to discern two pieces of information based on our [View](../../types/network.md#universal-types): + 1. Is a particular assignment relevant under a given `View`? + 2. Is a particular approval relevant to any assignment in a set? + +It is acceptable for these two queries to yield false negatives with respect to our peers' views. For our own local view, they must not yield false negatives. When applied to our peers' views, it is acceptable for them to yield false negatives. The reason for that is that our peers' views may be beyond ours, and we are not capable of fully evaluating them. Once we have caught up, we can check again for false negatives to continue distributing. + +For assignments, what we need to be checking is whether we are aware of the (block, candidate) pair that the assignment references. For approvals, we need to be aware of an assignment by the same validator which references the candidate being approved. + +However, awareness on its own of a (block, candidate) pair would imply that even ancient candidates all the way back to the genesis are relevant. We are actually not interested in anything before finality. + + +## Protocol + +## Functionality + +```rust +type BlockScopedCandidate = (Hash, CandidateHash); + +/// The `State` struct is responsible for tracking the overall state of the subsystem. +/// +/// It tracks metadata about our view of the unfinalized chain, which assignments and approvals we have seen, and our peers' views. +struct State { + // These three fields are used in conjunction to construct a view over the unfinalized chain. + blocks_by_number: BTreeMap>, + blocks: HashMap, + finalized_number: BlockNumber, + + // Peer view data is partially stored here, and partially inline within the `BlockEntry`s + peer_views: HashMap, +} + +enum MessageFingerprint { + Assigment(Hash, u32, ValidatorIndex), + Approval(Hash, u32, ValidatorIndex), +} + +struct Knowledge { + known_messages: HashSet, +} + +/// Information about blocks in our current view as well as whether peers know of them. +struct BlockEntry { + // Peers who we know are aware of this block and thus, the candidates within it. This maps to their knowledge of messages. + known_by: HashMap, + // The number of the block. + number: BlockNumber, + // The parent hash of the block. + parent_hash: Hash, + // Our knowledge of messages. + knowledge: Knowledge, + // A votes entry for each candidate. + candidates: IndexMap, +} + +enum ApprovalState { + Assigned(AssignmentCert), + Approved(AssignmentCert, ApprovalSignature), +} + +/// Information about candidates in the context of a particular block they are included in. In other words, +/// multiple `CandidateEntry`s may exist for the same candidate, if it is included by multiple blocks - this is likely the case +/// when there are forks. +struct CandidateEntry { + approvals: HashMap, +} +``` + +### Network updates + +#### `NetworkBridgeEvent::PeerConnected` + +Add a blank view to the `peer_views` state. + +#### `NetworkBridgeEvent::PeerDisconnected` + +Remove the view under the associated `PeerId` from `State::peer_views`. + +Iterate over every `BlockEntry` and remove `PeerId` from it. + +#### `NetworkBridgeEvent::PeerViewChange` + +Invoke `unify_with_peer(peer, view)` to catch them up to messages we have. + +We also need to use the `view.finalized_number` to remove the `PeerId` from any blocks that it won't be wanting information about anymore. Note that we have to be on guard for peers doing crazy stuff like jumping their 'finalized_number` forward 10 trillion blocks to try and get us stuck in a loop for ages. + +One of the safeguards we can implement is to reject view updates from peers where the new `finalized_number` is less than the previous. + +We augment that by defining `constrain(x)` to output the x bounded by the first and last numbers in `state.blocks_by_number`. + +From there, we can loop backwards from `constrain(view.finalized_number)` until `constrain(last_view.finalized_number)` is reached, removing the `PeerId` from all `BlockEntry`s referenced at that height. We can break the loop early if we ever exit the bound supplied by the first block in `state.blocks_by_number`. + +#### `NetworkBridgeEvent::OurViewChange` + +Prune all lists from `blocks_by_number` with number less than or equal to `view.finalized_number`. Prune all the `BlockEntry`s referenced by those lists. + +#### `NetworkBridgeEvent::PeerMessage` + +If the message is of type `ApprovalDistributionV1Message::Assignment(assignment_cert, claimed_index)`, then call `import_and_circulate_assignment(MessageSource::Peer(sender), assignment_cert, claimed_index)` + +If the message is of type `ApprovalDistributionV1Message::Approval(approval_vote)`, then call `import_and_circulate_approval(MessageSource::Peer(sender), approval_vote)` + +### Subsystem Updates + +#### `ApprovalDistributionMessage::NewBlocks` + +Create `BlockEntry` and `CandidateEntries` for all blocks. + +For all peers: + * Compute `view_intersection` as the intersection of the peer's view blocks with the hashes of the new blocks. + * Invoke `unify_with_peer(peer, view_intersection)`. + +#### `ApprovalDistributionMessage::DistributeAsignment` + +Load the corresponding `BlockEntry`. Distribute to all peers in `known_by`. Add to the corresponding `CandidateEntry`. + +#### `ApprovalDistributionMessage::DistributeApproval` + +Load the corresponding `BlockEntry`. Distribute to all peers in `known_by`. Add to the corresponding `CandidateEntry`. + +### Utility + +```rust +enum MessageSource { + Peer(PeerId), + Local, +} +``` + +#### `import_and_circulate_assignment(source: MessageSource, assignment: IndirectAssignmentCert, claimed_candidate_index: u32)` + +Imports an assignment cert referenced by block hash and candidate index. As a postcondition, if the cert is valid, it will have distributed the cert to all peers who have the block in their view, with the exclusion of the peer referenced by the `MessageSource`. + + * Load the BlockEntry using `assignment.block_hash`. If it does not exist, report the source if it is `MessageSource::Peer` and return. + * Compute a fingerprint for the `assignment` using `claimed_candidate_index`. + * If the source is `MessageSource::Peer(sender)`: + * check if `peer` appears under `known_by` and whether the fingerprint is in the `known_messages` of the peer. If the peer does not know the block, report for providing data out-of-view and proceed. If the peer does know the block and the knowledge contains the fingerprint, report for providing replicate data and return. + * If the message fingerprint appears under the `BlockEntry`'s `Knowledge`, give the peer a small positive reputation boost and return. Note that we must do this after checking for out-of-view to avoid being spammed. If we did this check earlier, a peer could provide data out-of-view repeatedly and be rewarded for it. + * Dispatch `ApprovalVotingMessage::CheckAndImportAssignment(assignment)` and wait for the response. + * If the result is `AssignmentCheckResult::Accepted` or `AssignmentCheckResult::AcceptedDuplicate` + * If the vote was accepted but not duplicate, give the peer a positive reputation boost + * add the fingerprint to both our and the peer's knowledge in the `BlockEntry`. Note that we only doing this after making sure we have the right fingerprint. + * If the result is `AssignmentCheckResult::TooFarInFuture`, mildly punish the peer and return. + * If the result is `AssignmentCheckResult::Bad`, punish the peer and return. + * If the source is `MessageSource::Local(CandidateIndex)` + * check if the fingerprint appears under the `BlockEntry's` knowledge. If not, add it. + * Load the candidate entry for the given candidate index. It should exist unless there is a logic error in the approval voting subsystem. + * Set the approval state for the validator index to `ApprovalState::Assigned` unless the approval state is set already. This should not happen as long as the approval voting subsystem instructs us to ignore duplicate assignments. + * Dispatch a `ApprovalDistributionV1Message::Assignment(assignment, candidate_index)` to all peers in the `BlockEntry`'s `known_by` set, excluding the peer in the `source`, if `source` has kind `MessageSource::Peer`. Add the fingerprint of the assignment to the knowledge of each peer. + + +#### `import_and_circulate_approval(source: MessageSource, approval: IndirectSignedApprovalVote)` + +Imports an approval signature referenced by block hash and candidate index. + + * Load the BlockEntry using `approval.block_hash` and the candidate entry using `approval.candidate_entry`. If either does not exist, report the source if it is `MessageSource::Peer` and return. + * Compute a fingerprint for the approval. + * Compute a fingerprint for the corresponding assignment. If the `BlockEntry`'s knowledge does not contain that fingerprint, then report the source if it is `MessageSource::Peer` and return. All references to a fingerprint after this refer to the approval's, not the assignment's. + * If the source is `MessageSource::Peer(sender)`: + * check if `peer` appears under `known_by` and whether the fingerprint is in the `known_messages` of the peer. If the peer does not know the block, report for providing data out-of-view and proceed. If the peer does know the block and the knowledge contains the fingerprint, report for providing replicate data and return. + * If the message fingerprint appears under the `BlockEntry`'s `Knowledge`, give the peer a small positive reputation boost and return. Note that we must do this after checking for out-of-view to avoid being spammed. If we did this check earlier, a peer could provide data out-of-view repeatedly and be rewarded for it. + * Dispatch `ApprovalVotingMessage::CheckAndImportApproval(approval)` and wait for the response. + * If the result is `VoteCheckResult::Accepted(())`: + * Give the peer a positive reputation boost and add the fingerprint to both our and the peer's knowledge. + * If the result is `VoteCheckResult::Bad`: + * Report the peer and return. + * Load the candidate entry for the given candidate index. It should exist unless there is a logic error in the approval voting subsystem. + * Set the approval state for the validator index to `ApprovalState::Approved`. It should already be in the `Assigned` state as our `BlockEntry` knowledge contains a fingerprint for the assignment. + * Dispatch a `ApprovalDistributionV1Message::Approval(approval)` to all peers in the `BlockEntry`'s `known_by` set, excluding the peer in the `source`, if `source` has kind `MessageSource::Peer`. Add the fingerprint of the assignment to the knowledge of each peer. Note that this obeys the politeness conditions: + * We guarantee elsewhere that all peers within `known_by` are aware of all assignments relative to the block. + * We've checked that this specific approval has a corresponding assignment within the `BlockEntry`. + * Thus, all peers are aware of the assignment or have a message to them in-flight which will make them so. + + +#### `unify_with_peer(peer: PeerId, view)`: + +For each block in the view: + 1. Initialize a set `fresh_blocks = {}` + 2. Load the `BlockEntry` for the block. If the block is unknown, or the number is less than the view's finalized number, go to step 6. + 3. Inspect the `known_by` set of the `BlockEntry`. If the peer is already present, go to step 6. + 4. Add the peer to `known_by` with a cloned version of `block_entry.knowledge`. and add the hash of the block to `fresh_blocks`. + 5. Return to step 2 with the ancestor of the block. + 6. For each block in `fresh_blocks`, send all assignments and approvals for all candidates in those blocks to the peer. \ No newline at end of file diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md new file mode 100644 index 0000000000000000000000000000000000000000..d1addd207f167fd889312a4e17100ef8b5e9d769 --- /dev/null +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -0,0 +1,285 @@ +# Approval Voting + +Reading the [section on the approval protocol](../../protocol-approval.md) will likely be necessary to understand the aims of this subsystem. + +## Protocol + +Input: + - `ApprovalVotingMessage::CheckAndImportAssignment` + - `ApprovalVotingMessage::CheckAndImportApproval` + - `ApprovalVotingMessage::ApprovedAncestor` + +Output: + - `ApprovalDistributionMessage::DistributeAssignment` + - `ApprovalDistributionMessage::DistributeApproval` + - `RuntimeApiMessage::Request` + - `ChainApiMessage` + - `AvailabilityRecoveryMessage::Recover` + - `CandidateExecutionMessage::ValidateFromExhaustive` + +## Functionality + +The approval voting subsystem is responsible for casting votes and determining approval of candidates and as a result, blocks. + +This subsystem wraps a database which is used to store metadata about unfinalized blocks and the candidates within them. Candidates may appear in multiple blocks, and assignment criteria are chosen differently based on the hash of the block they appear in. + +## Database Schema + +The database schema is designed with the following goals in mind: + 1. To provide an easy index from unfinalized blocks to candidates + 1. To provide a lookup from candidate hash to approval status + 1. To be easy to clear on start-up. What has happened while we were offline is unimportant. + 1. To be fast to clear entries outdated by finality + +Structs: + +```rust +struct TrancheEntry { + tranche: DelayTranche, + // assigned validators who have not yet approved, and the instant we received + // their assignment. + assignments: Vec<(ValidatorIndex, Tick)>, +} + +struct OurAssignment { + cert: AssignmentCert, + tranche: DelayTranche, + validator_index: ValidatorIndex, + triggered: bool, +} + +struct ApprovalEntry { + tranches: Vec, // sorted ascending by tranche number. + backing_group: GroupIndex, + // When the next wakeup for this entry should occur. This is either to + // check a no-show or to check if we need to broadcast an assignment. + next_wakeup: Tick, + our_assignment: Option, + assignments: Bitfield, // n_validators bits + approved: bool, +} + +struct CandidateEntry { + candidate: CandidateReceipt, + session: SessionIndex, + // Assignments are based on blocks, so we need to track assignments separately + // based on the block we are looking at. + block_assignments: HashMap, + approvals: Bitfield, // n_validators bits +} + +struct BlockEntry { + block_hash: Hash, + session: SessionIndex, + slot: SlotNumber, + // random bytes derived from the VRF submitted within the block by the block + // author as a credential and used as input to approval assignment criteria. + relay_vrf_story: [u8; 32], + // The candidates included as-of this block and the index of the core they are + // leaving. Sorted ascending by core index. + candidates: Vec<(CoreIndex, Hash)>, + // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. + // The i'th bit is `true` iff the candidate has been approved in the context of + // this block. The block can be considered approved has all bits set to 1 + approved_bitfield: Bitfield, + rotation_offset: GroupIndex, + children: Vec, +} + +// slot_duration * 2 + DelayTranche gives the number of delay tranches since the +// unix epoch. +type Tick = u64; + +struct StoredBlockRange(BlockNumber, BlockNumber); +``` + +In the schema, we map + +``` +"StoredBlocks" => StoredBlockRange +BlockNumber => Vec +BlockHash => BlockEntry +CandidateHash => CandidateEntry +``` + +## Logic + +```rust +const APPROVAL_SESSIONS: SessionIndex = 6; +``` + +In-memory state: + +```rust +struct ApprovalVoteRequest { + validator_index: ValidatorIndex, + block_hash: Hash, + candidate_index: u32, +} + +struct State { + earliest_session: SessionIndex, + session_info: Vec, + keystore: KeyStorePtr, + wakeups: BTreeMap>, // Tick -> [(Relay Block, Candidate Hash)] + + // These are connected to each other. + approval_vote_tx: mpsc::Sender, + approval_vote_rx: mpsc::Receiver, +} +``` + +[`SessionInfo`](../../runtime/session_info.md) + +On start-up, we clear everything currently stored by the database. This is done by loading the `StoredBlockRange`, iterating through each block number, iterating through each block hash, and iterating through each candidate referenced by each block. Although this is `O(o*n*p)`, we don't expect to have more than a few unfinalized blocks at any time and in extreme cases, a few thousand. The clearing operation should be relatively fast as a result. + +Main loop: + * Each iteration, select over all of + * The next `Tick` in `wakeups`: trigger `wakeup_process` for each `(Hash, Hash)` pair scheduled under the `Tick` and then remove all entries under the `Tick`. + * The next message from the overseer: handle the message as described in the [Incoming Messages section](#incoming-messages) + * The next request from `approval_vote_rx`: handle with `issue_approval` + +### Incoming Messages + +#### `OverseerSignal::BlockFinalized` + +On receiving an `OverseerSignal::BlockFinalized(h)`, we fetch the block number `b` of that block from the ChainApi subsystem. We update our `StoredBlockRange` to begin at `b+1`. Additionally, we remove all block entries and candidates referenced by them up to and including `b`. Lastly, we prune out all descendents of `h` transitively: when we remove a `BlockEntry` with number `b` that is not equal to `h`, we recursively delete all the `BlockEntry`s referenced as children. We remove the `block_assignments` entry for the block hash and if `block_assignments` is now empty, remove the `CandidateEntry`. + + +#### `OverseerSignal::ActiveLeavesUpdate` + +On receiving an `OverseerSignal::ActiveLeavesUpdate(update)`: + * We determine the set of new blocks that were not in our previous view. This is done by querying the ancestry of all new items in the view and contrasting against the stored `BlockNumber`s. Typically, there will be only one new block. We fetch the headers and information on these blocks from the ChainApi subsystem. + * We update the `StoredBlockRange` and the `BlockNumber` maps. + * We use the RuntimeApiSubsystem to determine information about these blocks. It is generally safe to assume that runtime state is available for recent, unfinalized blocks. In the case that it isn't, it means that we are catching up to the head of the chain and needn't worry about assignments to those blocks anyway, as the security assumption of the protocol tolerates nodes being temporarily offline or out-of-date. + * We fetch the set of candidates included by each block by dispatching a `RuntimeApiRequest::CandidateEvents` and checking the `CandidateIncluded` events. + * We fetch the session of the block by dispatching a `session_index_for_child` request with the parent-hash of the block. + * If the `session index - APPROVAL_SESSIONS > state.earliest_session`, then bump `state.earliest_sessions` to that amount and prune earlier sessions. + * If the session isn't in our `state.session_info`, load the session info for it and for all sessions since the earliest-session, including the earliest-session, if that is missing. And it can be, just after pruning, if we've done a big jump forward, as is the case when we've just finished chain synchronization. + * If any of the runtime API calls fail, we just warn and skip the block. + * We use the RuntimeApiSubsystem to determine the set of candidates included in these blocks and use BABE logic to determine the slot number and VRF of the blocks. + * We also note how late we appear to have received the block. We create a `BlockEntry` for each block and a `CandidateEntry` for each candidate obtained from `CandidateIncluded` events after making a `RuntimeApiRequest::CandidateEvents` request. + * Ensure that the `CandidateEntry` contains a `block_assignments` entry for the block, with the correct backing group set. + * If a validator in this session, compute and assign `our_assignment` for the `block_assignments` + * Only if not a member of the backing group. + * Run `RelayVRFModulo` and `RelayVRFDelay` according to the [the approvals protocol section](../../protocol-approval.md#assignment-criteria). Ensure that the assigned core derived from the output is covered by the auxiliary signature aggregated in the `VRFPRoof`. + * invoke `process_wakeup(relay_block, candidate)` for each new candidate in each new block - this will automatically broadcast a 0-tranche assignment, kick off approval work, and schedule the next delay. + * Dispatch an `ApprovalDistributionMessage::NewBlocks` with the meta information filled out for each new block. + +#### `ApprovalVotingMessage::CheckAndImportAssignment` + +On receiving a `ApprovalVotingMessage::CheckAndImportAssignment` message, we check the assignment cert against the block entry. The cert itself contains information necessary to determine the candidate that is being assigned-to. In detail: + * Load the `BlockEntry` for the relay-parent referenced by the message. If there is none, return `VoteCheckResult::Report`. + * Fetch the `SessionInfo` for the session of the block + * Determine the assignment key of the validator based on that. + * Check the assignment cert + * If the cert kind is `RelayVRFModulo`, then the certificate is valid as long as `sample < session_info.relay_vrf_samples` and the VRF is valid for the validator's key with the input `block_entry.relay_vrf_story ++ sample.encode()` as described with [the approvals protocol section](../../protocol-approval.md#assignment-criteria). We set `core_index = vrf.make_bytes().to_u32() % session_info.n_cores`. If the `BlockEntry` causes inclusion of a candidate at `core_index`, then this is a valid assignment for the candidate at `core_index` and has delay tranche 0. Otherwise, it can be ignored. + * If the cert kind is `RelayVRFDelay`, then we check if the VRF is valid for the validator's key with the input `block_entry.relay_vrf_story ++ cert.core_index.encode()` as described in [the approvals protocol section](../../protocol-approval.md#assignment-criteria). The cert can be ignored if the block did not cause inclusion of a candidate on that core index. Otherwise, this is a valid assignment for the included candidate. The delay tranche for the assignment is determined by reducing `(vrf.make_bytes().to_u64() % (session_info.n_delay_tranches + session_info.zeroth_delay_tranche_width)).saturating_sub(session_info.zeroth_delay_tranche_width)`. + * We also check that the core index derived by the output is covered by the `VRFProof` by means of an auxiliary signature. + * If the delay tranche is too far in the future, return `VoteCheckResult::Ignore`. + * `import_checked_assignment` + * return the appropriate `VoteCheckResult` on the response channel. + +#### `ApprovalVotingMessage::CheckAndImportApproval` + +On receiving a `CheckAndImportApproval(indirect_approval_vote, response_channel)` message: + * Fetch the `BlockEntry` from the indirect approval vote's `block_hash`. If none, return `ApprovalCheckResult::Bad`. + * Fetch the `CandidateEntry` from the indirect approval vote's `candidate_index`. If the block did not trigger inclusion of enough candidates, return `ApprovalCheckResult::Bad`. + * Construct a `SignedApprovalVote` using the candidate hash and check against the validator's approval key, based on the session info of the block. If invalid or no such validator, return `ApprovalCheckResult::Bad`. + * Send `ApprovalCheckResult::Accepted` + * `import_checked_approval(BlockEntry, CandidateEntry, ValidatorIndex)` + +#### `ApprovalVotingMessage::ApprovedAncestor` + +On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: + * Iterate over the ancestry of the hash all the way back to block number given, starting from the provided block hash. + * Keep track of an `all_approved_max: Option`. + * For each block hash encountered, load the `BlockEntry` associated. If any are not found, return `None` on the response channel and conclude. + * If the block entry's `approval_bitfield` has all bits set to 1 and `all_approved_max == None`, set `all_approved_max = Some(current_hash)`. + * If the block entry's `approval_bitfield` has any 0 bits, set `all_approved_max = None`. + * After iterating all ancestry, return `all_approved_max`. + +### Utility + +#### `tranche_now(slot_number, time) -> DelayTranche` + * Convert `time.saturating_sub(slot_number.to_time())` to a delay tranches value + +#### `import_checked_assignment` + * Load the candidate in question and access the `approval_entry` for the block hash the cert references. + * Ignore if we already observe the validator as having been assigned. + * Ensure the validator index is not part of the backing group for the candidate. + * Ensure the validator index is not present in the approval entry already. + * Create a tranche entry for the delay tranche in the approval entry and note the assignment within it. + * Note the candidate index within the approval entry. + * Schedule a wakeup with `next_wakeup`. + +#### `import_checked_approval(BlockEntry, CandidateEntry, ValidatorIndex)` + * Set the corresponding bit of the `approvals` bitfield in the `CandidateEntry` to `1`. If already `1`, return. + * For each `ApprovalEntry` in the `CandidateEntry` (typically only 1), check whether the validator is assigned as a checker. + * If so, set `n_tranches = tranches_to_approve(approval_entry, tranche_now(block.slot, now()))`. + * If `check_approval(block_entry, approval_entry, n_tranches)` is true, set the corresponding bit in the `block_entry.approved_bitfield`. + +#### `tranches_to_approve(approval_entry, tranche_now) -> RequiredTranches` + +```rust +enum RequiredTranches { + // All validators appear to be required, based on tranches already taken and remaining no-shows. + All, + // More tranches required - We're awaiting more assignments. The given `DelayTranche` indicates the + // upper bound of tranches that should broadcast based on the last no-show. + Pending(DelayTranche), + // An exact number of required tranches and a number of no-shows. This indicates that the amount of `needed_approvals` are assigned and additionally all no-shows are covered. + Exact(DelayTranche, usize), +} +``` + + * Determine the amount of tranches `n_tranches` our view of the protocol requires of this approval entry. + * Ignore all tranches beyond `tranche_now`. + * First, take tranches until we have at least `session_info.needed_approvals`. Call the number of tranches taken `k` + * Then, count no-shows in tranches `0..k`. For each no-show, we require another non-empty tranche. Take another non-empty tranche for each no-show, so now we've taken `l = k + j` tranches, where `j` is at least the number of no-shows within tranches `0..k`. + * Count no-shows in tranches `k..l` and for each of those, take another non-empty tranche for each no-show. Repeat so on until either + * We run out of tranches to take, having not received any assignments past a certain point. In this case we set `n_tranches` to a special value `RequiredTranches::Pending(last_taken_tranche + uncovered_no_shows)` which indicates that new assignments are needed. `uncovered_no_shows` is the number of no-shows we have not yet covered with `last_taken_tranche`. + * All no-shows are covered by at least one non-empty tranche. Set `n_tranches` to the number of tranches taken and return `RequiredTranches::Exact(n_tranches)`. + * The amount of assignments in non-empty & taken tranches plus the amount of needed extras equals or exceeds the total number of validators for the approval entry, which can be obtained by measuring the bitfield. In this case we return a special value `RequiredTranches::All` indicating that all validators have effectively been assigned to check. + * return `n_tranches` + +#### `check_approval(block_entry, approval_entry, n_tranches) -> bool` + * If `n_tranches` is `RequiredTranches::Pending`, return false + * If `n_tranches` is `RequiredTranches::All`, then we return `3 * n_approvals > 2 * n_validators`. + * If `n_tranches` is `RequiredTranches::Exact(tranche, no_shows), then we return whether all assigned validators up to `tranche` less `no_shows` have approved. e.g. if we had 5 tranches and 1 no-show, we would accept all validators in tranches 0..=5 except for 1 approving. In that example, we also accept all validators in tranches 0..=5 approving, but that would indicate that the `RequiredTranches` value was incorrectly constructed, so it is not realistic. If there are more missing approvals than there are no-shows, that indicates that there are some assignments which are not yet no-shows, but may become no-shows. + +#### `process_wakeup(relay_block, candidate_hash)` + * Load the `BlockEntry` and `CandidateEntry` from disk. If either is not present, this may have lost a race with finality and can be ignored. Also load the `ApprovalEntry` for the block and candidate. + * Set `required = tranches_to_approve(approval_entry, tranche_now(block.slot, now()))` + * Determine if we should trigger our assignment. + * If we've already triggered or `OurAssignment` is `None`, we do not trigger. + * If `required` is `RequiredTranches::All`, then we trigger if `check_approval(block_entry, approval_entry, All)` is false. + * If `required` is `RequiredTranches::Pending(max), then we trigger if our assignment's tranche is less than or equal to `max`. + * If `required` is `RequiredTranches::Exact(tranche)` then we do not trigger, because this value indicates that no new assignments are needed at the moment. + * If we should trigger our assignment + * Import the assignment to the `ApprovalEntry` + * Broadcast on network with an `ApprovalDistributionMessage::DistributeAssignment`. + * Kick off approval work with `launch_approval` + * Schedule another wakeup based on `next_wakeup` + +#### `next_wakeup(approval_entry, candidate_entry)`: + * If the `approval_entry` is approved, this doesn't need to be woken up again. + * Return the earlier of our next no-show timeout or the tranche of our assignment, if not yet triggered + * Our next no-show timeout is computed by finding the earliest-received assignment within `n_tranches` for which we have not received an approval and adding `to_ticks(session_info.no_show_slots)` to it. + +#### `launch_approval(SessionIndex, CandidateReceipt, ValidatorIndex, block_hash, candidate_index)`: + * Extract the public key of the `ValidatorIndex` from the `SessionInfo` for the session. + * Issue an `AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session_index, response_sender)` + * Load the historical validation code of the parachain by dispatching a `RuntimeApiRequest::HistoricalValidationCode(`descriptor.para_id`, `descriptor.relay_parent`)` against the state of `block_hash`. + * Spawn a background task with a clone of `approval_vote_tx` + * Wait for the available data + * Issue a `CandidateValidationMessage::ValidateFromExhaustive` message + * Wait for the result of validation + * If valid, issue a message on `approval_vote_tx` detailing the request. + +#### `issue_approval(request)`: + * Fetch the block entry and candidate entry. Ignore if `None` - we've probably just lost a race with finality. + * Construct a `SignedApprovalVote` with the validator index for the session. + * `import_checked_approval(block_entry, candidate_entry, validator_index)` + * Construct a `IndirectSignedApprovalVote` using the informatio about the vote. + * Dispatch `ApprovalDistributionMessage::DistributeApproval`. diff --git a/roadmap/implementers-guide/src/node/approval/dispute-participation.md b/roadmap/implementers-guide/src/node/approval/dispute-participation.md new file mode 100644 index 0000000000000000000000000000000000000000..10c278c20df114f451a536f0f357cf787b619b22 --- /dev/null +++ b/roadmap/implementers-guide/src/node/approval/dispute-participation.md @@ -0,0 +1,5 @@ +# Dispute Participation + +## Protocol + +## Functionality \ No newline at end of file diff --git a/roadmap/implementers-guide/src/node/availability/availability-recovery.md b/roadmap/implementers-guide/src/node/availability/availability-recovery.md new file mode 100644 index 0000000000000000000000000000000000000000..a05b9e9c17499a3794943c81f1b3b50eed3f3f0f --- /dev/null +++ b/roadmap/implementers-guide/src/node/availability/availability-recovery.md @@ -0,0 +1,169 @@ +# Availability Recovery + +> TODO: + +This subsystem is the inverse of the [Availability Distribution](availability-distribution.md) subsystem: validators will serve the availability chunks kept in the availability store to nodes who connect to them. And the subsystem will also implement the other side: the logic for nodes to connect to validators, request availability pieces, and reconstruct the `AvailableData`. + +This version of the availability recovery subsystem is based off of direct connections to validators. In order to recover any given `AvailableData`, we must recover at least `f + 1` pieces from validators of the session. Thus, we will connect to and query randomly chosen validators until we have received `f + 1` pieces. + +## Protocol + +`PeerSet`: `Validation` + +Input: + +- NetworkBridgeUpdateV1(update) +- AvailabilityRecoveryMessage::RecoverAvailableData(candidate, session, response) + +Output: + +- NetworkBridge::SendValidationMessage +- NetworkBridge::ReportPeer +- AvailabilityStore::QueryChunk + +## Functionality + +We hold a state which tracks the current recovery interactions we have live, as well as which request IDs correspond to which interactions. An interaction is a structure encapsulating all interaction with the network necessary to recover the available data. + +```rust +type ChunkResponse = Result<(PeerId, ErasureChunk), Unavailable>; + +struct AwaitedChunk { + issued_at: Instant, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + response: ResponseChannel, +} + +struct State { + /// Each interaction is implemented as its own async task, and these handles are for communicating with them. + interactions: Map, + /// A recent block hash for which state should be available. + live_block_hash: Hash, + discovering_validators: Map>, + live_chunk_requests: Map, + next_request_id: RequestId, + connecting_validators: Stream<(AuthorityDiscoveryId, PeerId)>, + + /// interaction communication. This is cloned and given to interactions that are spun up. + from_interaction_tx: Sender, + /// receiver for messages from interactions. + from_interaction_rx: Receiver, + + // An LRU cache of recently recovered data. + availability_lru: LruCache>, +} + +struct InteractionHandle { + awaiting: Vec>>, +} + +struct Unavailable; +enum FromInteraction { + // An interaction concluded. + Concluded(CandidateHash, Result), + // Make a request of a particular chunk from a particular validator. + MakeRequest( + AuthorityDiscoveryId, + CandidateHash, + ValidatorIndex, + ResponseChannel, + ), + // Report a peer. + ReportPeer( + PeerId, + Rep, + ), +} + +struct Interaction { + to_state: Sender, + validator_authority_keys: Vec, + validators: Vec, + // a random shuffling of the validators which indicates the order in which we connect to the validators and + // request the chunk from them. + shuffling: Vec, + // The number of pieces needed. + threshold: usize, + candidate_hash: Hash, + erasure_root: Hash, + received_chunks: Map, + requesting_chunks: FuturesUnordered>, +} +``` + +### Signal Handling + +On `ActiveLeavesUpdate`, if `activated` is non-empty, set `state.live_block_hash` to the first block in `Activated`. + +Ignore `BlockFinalized` signals. + +On `Conclude`, shut down the subsystem. + +#### `AvailabilityRecoveryMessage::RecoverAvailableData(receipt, session, response)` + +1. Check the `availability_lru` for the candidate and return the data if so. +1. Check if there is already an interaction handle for the request. If so, add the response handle to it. +1. Otherwise, load the session info for the given session under the state of `live_block_hash`, and initiate an interaction with *launch_interaction*. Add an interaction handle to the state and add the response channel to it. +1. If the session info is not available, return `RecoveryError::Unavailable` on the response channel. + +### From-interaction logic + +#### `FromInteraction::Concluded` + +1. Load the entry from the `interactions` map. It should always exist, if not for logic errors. Send the result to each member of `awaiting`. +1. Add the entry to the availability_lru. + +#### `FromInteraction::MakeRequest(discovery_pub, candidate_hash, validator_index, response)` + +1. Add an `AwaitedRequest` to the `discovering_validators` map under `discovery_pub`. +1. Issue a `NetworkBridgeMessage::ConnectToValidators`. +1. Add the stream of connected validator events to `state.connecting_validators`. + +#### `FromInteraction::ReportPeer(peer, rep)` + +1. Issue a `NetworkBridgeMessage::ReportPeer(peer, rep)`. + +### Responding to network events. + +#### On `connecting_validators` event: + +1. If the validator exists under `discovering_validators`, remove the entry. +1. For each `AwaitedChunk` in the entry, issue a `AvailabilityRecoveryV1Message::RequestChunk(next_request_id, candidate_hash, validator_index)` and make an entry in the `live_chunk_requests` map. + +#### On receiving `AvailabilityRecoveryV1::RequestChunk(r_id, candidate_hash, validator_index)` + +1. Issue a `AvailabilityStore::QueryChunk(candidate-hash, validator_index, response)` message. +1. Whatever the result, issue a `AvailabilityRecoveryV1Message::Chunk(r_id, response)` message. + +#### On receiving `AvailabilityRecoveryV1::Chunk(r_id, chunk)` + +1. If there exists an entry under `r_id`, remove it. If there doesn't exist one, report the peer and return. If the peer in the entry doesn't match the sending peer, reinstate the entry, report the peer, and return. +1. Send the chunk response on the `awaited_chunk` for the interaction to handle. + +### Interaction logic + +#### `launch_interaction(session_index, session_info, candidate_receipt, candidate_hash)` + +1. Compute the threshold from the session info. It should be `f + 1`, where `n = 3f + k`, where `k in {1, 2, 3}`, and `n` is the number of validators. +1. Set the various fields of `Interaction` based on the validator lists in `session_info`. Compute a random shuffling of the validator indices. +1. Set the `to_state` sender to be equal to a clone of `state.from_interaction_tx`. +1. Initialize `received_chunks` to an empty set, as well as `requesting_chunks`. + +Launch the interaction as a background task running `interaction_loop(interaction)`. + +#### `interaction_loop(interaction)` + +```rust +// How many parallel requests to have going at once. +const N_PARALLEL: usize = 50; +``` + +Loop: + * Poll for new updates from `requesting_chunks`. Check merkle proofs of any received chunks, and any failures should lead to issuance of a `FromInteraction::ReportPeer` message. + * If `received_chunks` has more than `threshold` entries, attempt to recover the data. If that fails, or a re-encoding of it doesn't match the expected erasure root, break and issue a `FromInteraction::Concluded(RecoveryError::Invalid)`. Otherwise, issue a `FromInteraction::Concluded(Ok(()))`. + * While there are fewer than `N_PARALLEL` entries in `requesting_chunks`, + * Pop the next item from `shuffling`. If it's empty and `requesting_chunks` is empty, break and issue a `FromInteraction::Concluded(RecoveryError::Unavailable)`. + * Initialize `(tx, rx)`. + * Issue a `FromInteraction::MakeRequest(validator, candidate_hash, validator_index, tx)`. + * Add `rx` to `requesting_chunks`. diff --git a/roadmap/implementers-guide/src/node/backing/candidate-backing.md b/roadmap/implementers-guide/src/node/backing/candidate-backing.md index afea5e8ee40255ec43414b08695ba870a663378f..016c5096749493f246d68c9979a7a62fbda1f99b 100644 --- a/roadmap/implementers-guide/src/node/backing/candidate-backing.md +++ b/roadmap/implementers-guide/src/node/backing/candidate-backing.md @@ -39,7 +39,7 @@ The subsystem should maintain a set of handles to Candidate Backing Jobs that ar ### On Receiving `CandidateBackingMessage` * If the message is a [`CandidateBackingMessage`][CBM]`::GetBackedCandidates`, get all backable candidates from the statement table and send them back. -* If the message is a [`CandidateBackingMessage`][CBM]`::Second`, sign and dispatch a `Seconded` statement only if we have not seconded any other candidate and have not signed a `Valid` statement for the requested candidate. Signing both a `Seconded` and `Valid` message is a double-voting misbehavior with a heavy penalty, and this could occur if another validator has seconded the same candidate and we've received their message before the internal seconding request. +* If the message is a [`CandidateBackingMessage`][CBM]`::Second`, sign and dispatch a `Seconded` statement only if we have not seconded any other candidate and have not signed a `Valid` statement for the requested candidate. Signing both a `Seconded` and `Valid` message is a double-voting misbehavior with a heavy penalty, and this could occur if another validator has seconded the same candidate and we've received their message before the internal seconding request. After successfully dispatching the `Seconded` statement we have to distribute the PoV. * If the message is a [`CandidateBackingMessage`][CBM]`::Statement`, count the statement to the quorum. If the statement in the message is `Seconded` and it contains a candidate that belongs to our assignment, request the corresponding `PoV` from the `PoVDistribution` and launch validation. Issue our own `Valid` or `Invalid` statement as a result. > big TODO: "contextual execution" @@ -67,26 +67,28 @@ The goal of a Candidate Backing Job is to produce as many backable candidates as ```rust match msg { - CetBackedCandidates(hash, tx) => { + GetBackedCandidates(hashes, tx) => { // Send back a set of backable candidates. } CandidateBackingMessage::Second(hash, candidate) => { if candidate is unknown and in local assignment { - spawn_validation_work(candidate, parachain head, validation function) + if spawn_validation_work(candidate, parachain head, validation function).await == Valid { + send(DistributePoV(pov)) + } } } CandidateBackingMessage::Statement(hash, statement) => { // count to the votes on this candidate - if let Statement::Seconded(candidate) = statement { - if candidate.parachain_id == our_assignment { - spawn_validation_work(candidate, parachain head, validation function) - } - } + if let Statement::Seconded(candidate) = statement { + if candidate.parachain_id == our_assignment { + spawn_validation_work(candidate, parachain head, validation function) + } + } } } ``` -Add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group majority, send a [`ProvisionerMessage`][PM]`::ProvisionableData(ProvisionableData::BackedCandidate(BackedCandidate))` message. +Add `Seconded` statements and `Valid` statements to a quorum. If quorum reaches validator-group majority, send a [`ProvisionerMessage`][PM]`::ProvisionableData(ProvisionableData::BackedCandidate(CandidateReceipt))` message. `Invalid` statements that conflict with already witnessed `Seconded` and `Valid` statements for the given candidate, statements that are double-votes, self-contradictions and so on, should result in issuing a [`ProvisionerMessage`][PM]`::MisbehaviorReport` message for each newly detected case of this kind. ### Validating Candidates. @@ -110,14 +112,18 @@ fn spawn_validation_work(candidate, parachain head, validation function) { ### Fetch Pov Block Create a `(sender, receiver)` pair. -Dispatch a [`PoVDistributionMessage`][PDM]`::FecthPoV(relay_parent, candidate_hash, sender)` and listen on the receiver for a response. +Dispatch a [`PoVDistributionMessage`][PDM]`::FetchPoV(relay_parent, candidate_hash, sender)` and listen on the receiver for a response. + +### Distribute Pov Block + +Dispatch a [`PoVDistributionMessage`][PDM]`::DistributePoV(relay_parent, candidate_descriptor, pov)`. ### Validate PoV Block Create a `(sender, receiver)` pair. Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, pov, sender)` and listen on the receiver for a response. -### Distribute Signed Statemnet +### Distribute Signed Statement Dispatch a [`StatementDistributionMessage`][PDM]`::Share(relay_parent, SignedFullStatement)`. diff --git a/roadmap/implementers-guide/src/node/backing/candidate-selection.md b/roadmap/implementers-guide/src/node/backing/candidate-selection.md index db441b7f7ed03c49cc437f4bbec3473bb6175f98..4439f10dd18defbaa542660962b38c543407525e 100644 --- a/roadmap/implementers-guide/src/node/backing/candidate-selection.md +++ b/roadmap/implementers-guide/src/node/backing/candidate-selection.md @@ -16,7 +16,6 @@ Input: [`CandidateSelectionMessage`](../../types/overseer-protocol.md#candidate- Output: -- Validation requests to Validation subsystem - [`CandidateBackingMessage`](../../types/overseer-protocol.md#candidate-backing-message)`::Second` - Peer set manager: report peers (collators who have misbehaved) diff --git a/roadmap/implementers-guide/src/node/backing/pov-distribution.md b/roadmap/implementers-guide/src/node/backing/pov-distribution.md index 9b32abbd560a4dac21d246ec8f44ac197b623e93..3cf6dd995aa5d96dd982c210b23f4846344aba9f 100644 --- a/roadmap/implementers-guide/src/node/backing/pov-distribution.md +++ b/roadmap/implementers-guide/src/node/backing/pov-distribution.md @@ -17,7 +17,7 @@ Output: ## Functionality -This network protocol is responsible for distributing [`PoV`s](../../types/availability.md#proof-of-validity) by gossip. Since PoVs are heavy in practice, gossip is far from the most efficient way to distribute them. In the future, this should be replaced by a better network protocol that finds validators who have validated the block and connects to them directly. This protocol is descrbied. +This network protocol is responsible for distributing [`PoV`s](../../types/availability.md#proof-of-validity) by gossip. Since PoVs are heavy in practice, gossip is far from the most efficient way to distribute them. In the future, this should be replaced by a better network protocol that finds validators who have validated the block and connects to them directly. This protocol is described. This protocol is described in terms of "us" and our peers, with the understanding that this is the procedure that any honest node will run. It has the following goals: - We never have to buffer an unbounded amount of data @@ -25,7 +25,7 @@ This protocol is described in terms of "us" and our peers, with the understandin As we are gossiping, we need to track which PoVs our peers are waiting for to avoid sending them data that they are not expecting. It is not reasonable to expect our peers to buffer unexpected PoVs, just as we will not buffer unexpected PoVs. So notifying our peers about what is being awaited is key. However it is important that the notifications system is also bounded. -For this, in order to avoid reaching into the internals of the [Statement Distribution](statement-distribution.md) Subsystem, we can rely on an expected propery of candidate backing: that each validator can second up to 2 candidates per chain head. This will typically be only one, because they are only supposed to issue one, but they can equivocate if they are willing to be slashed. So we can set a cap on the number of PoVs each peer is allowed to notify us that they are waiting for at a given relay-parent. This cap will be twice the number of validators at that relay-parent. In practice, this is a very lax upper bound that can be reduced much further if desired. +For this, in order to avoid reaching into the internals of the [Statement Distribution](statement-distribution.md) Subsystem, we can rely on an expected property of candidate backing: that each validator can second up to 2 candidates per chain head. This will typically be only one, because they are only supposed to issue one, but they can equivocate if they are willing to be slashed. So we can set a cap on the number of PoVs each peer is allowed to notify us that they are waiting for at a given relay-parent. This cap will be twice the number of validators at that relay-parent. In practice, this is a very lax upper bound that can be reduced much further if desired. The view update mechanism of the [Network Bridge](../utility/network-bridge.md) ensures that peers are only allowed to consider a certain set of relay-parents as live. So this bounding mechanism caps the amount of data we need to store per peer at any time at `sum({ 2 * n_validators_at_head(head) * sizeof(hash) for head in view_heads })`. Additionally, peers should only be allowed to notify us of PoV hashes they are waiting for in the context of relay-parents in our own local view, which means that `n_validators_at_head` is implied to be `0` for relay-parents not in our own local view. diff --git a/roadmap/implementers-guide/src/node/collators/collation-generation.md b/roadmap/implementers-guide/src/node/collators/collation-generation.md index 15b510baeaaa47ee1290252d1e98f3c0c0b712a7..56401823590ddbb984ec3112ad80dac20c7ceaae 100644 --- a/roadmap/implementers-guide/src/node/collators/collation-generation.md +++ b/roadmap/implementers-guide/src/node/collators/collation-generation.md @@ -32,12 +32,16 @@ pub struct Collation { pub proof_of_validity: PoV, } +type CollatorFn = Box< + dyn Fn(Hash, &ValidationData) -> Pin>>> +>; + struct CollationGenerationConfig { key: CollatorPair, - /// Collate will be called with the relay chain hash the parachain should build + /// Collate will be called with the relay chain hash the parachain should build /// a block on and the `ValidationData` that provides information about the state /// of the parachain on the relay chain. - collator: Box Pin>>>> + collator: CollatorFn, para_id: ParaId, } ``` diff --git a/roadmap/implementers-guide/src/node/grandpa-voting-rule.md b/roadmap/implementers-guide/src/node/grandpa-voting-rule.md new file mode 100644 index 0000000000000000000000000000000000000000..57da4e0dad6957aa05c284267478410d1c74381f --- /dev/null +++ b/roadmap/implementers-guide/src/node/grandpa-voting-rule.md @@ -0,0 +1,11 @@ +# GRANDPA Voting Rule + +[GRANDPA](https://w3f-research.readthedocs.io/en/latest/polkadot/finality.html) is the finality engine of Polkadot. + +One broad goal of finality, which applies across many different blockchains, is that there should exist only one finalized block at each height in the finalized chain. Before a block at a given height is finalized, it may compete with other forks. + +GRANDPA's regular voting rule is for each validator to select the longest chain they are aware of. GRANDPA proceeds in rounds, collecting information from all online validators and determines the blocks that a supermajority of validators all have in common with each other. + +For parachains, we extend the security guarantee of finality to be such that no invalid parachain candidate may be included in a finalized block. Candidates may be included in some fork of the relay chain with only a few backing votes behind them. After that point, we run the [Approvals Protocol](../protocol-approval.md), which is implemented as the [Approval Voting](approval/approval-voting.md) subsystem. This system involves validators self-selecting to re-check candidates included in all observed forks of the relay chain as well as an algorithm for observing validators' statements about assignment and approval in order to determine which candidates, and thus blocks, are with high probability valid. The highest approved ancestor of a given block can be determined by querying the Approval Voting subsystem via the [`ApprovalVotingMessage::ApprovedAncestor`](../types/overseer-protocol.md#approval-voting) message. + +Lastly, we refuse to finalize any block including a candidate for which we are aware of an ongoing dispute or of a dispute resolving against the candidate. The exact means of doing this has not been determined yet. diff --git a/roadmap/implementers-guide/src/node/utility/network-bridge.md b/roadmap/implementers-guide/src/node/utility/network-bridge.md index 5ace56b2a9c83a8d336c866248666a139e793f88..d89ec7e8fe42037a4ab29f433942a031ef58bd41 100644 --- a/roadmap/implementers-guide/src/node/utility/network-bridge.md +++ b/roadmap/implementers-guide/src/node/utility/network-bridge.md @@ -61,6 +61,12 @@ The `activated` and `deactivated` lists determine the evolution of our local vie If we are connected to the same peer on both peer-sets, we will send the peer two view updates as a result. +### Overseer Signal: BlockFinalized + +We obtain the number of the block hash in the event by issuing a `ChainApiMessage::BlockNumber` request and then issue a `ProtocolMessage::ViewUpdate` to each connected peer on each peer-set. We also issue a `NetworkBridgeEvent::OurViewChange` to each event handler for each protocol. + +If we are connected to the same peer on both peer-sets, we will send the peer two view updates as a result. + ### Network Event: Peer Connected Issue a `NetworkBridgeEvent::PeerConnected` for each [Event Handler](#event-handlers) of the peer-set and negotiated protocol version of the peer. diff --git a/roadmap/implementers-guide/src/protocol-approval.md b/roadmap/implementers-guide/src/protocol-approval.md index 189a2d8e8c3afa6ada3acd1f2bc92c1d0a686fda..828724916a32f3b8ff471b2d8e14e6e610f9782c 100644 --- a/roadmap/implementers-guide/src/protocol-approval.md +++ b/roadmap/implementers-guide/src/protocol-approval.md @@ -28,7 +28,7 @@ Approval has roughly two parts: - **Approval checks** listens to the assignments subsystem for outgoing assignment notices that we shall check specific candidates. It then performs these checks by first invoking the reconstruction subsystem to obtain the candidate, second invoking the candidate validity utility subsystem upon the candidate, and finally sending out an approval vote, or perhaps initiating a dispute. -These both run first as off-chain consensus protocols using messages gossiped among all validators, and second as an on-chain record of this off-chain protocols' progress after the fact. We need the on-chain protocol to provide rewards for the on-chain protocol, and doing an on-chain protocol simplify interaction with GRANDPA. +These both run first as off-chain consensus protocols using messages gossiped among all validators, and second as an on-chain record of this off-chain protocols' progress after the fact. We need the on-chain protocol to provide rewards for the off-chain protocol. Approval requires two gossiped message types, assignment notices created by its assignments subsystem, and approval votes sent by our approval checks subsystem when authorized by the candidate validity utility subsystem. @@ -102,11 +102,11 @@ Assignment criteria come in three flavors, `RelayVRFModulo`, `RelayVRFDelay` and Among these, we have two distinct VRF output computations: -`RelayVRFModulo` runs several distinct samples whose VRF input is the `RelayVRFStory` and the sample number. It computes the VRF output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "core", reduces this number modulo the number of availability cores, and outputs the candidate just declared available by, and included by aka leaving, that availability core. We drop any samples that return no candidate because no candidate was leaving the sampled availability core in this relay chain block. We choose three samples initially, but we could make polkadot more secure and efficient by increasing this to four or five, and reducing the backing checks accordingly. All successful `RelayVRFModulo` samples are assigned delay tranche zero. +`RelayVRFModulo` runs several distinct samples whose VRF input is the `RelayVRFStory` and the sample number. It computes the VRF output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Core", reduces this number modulo the number of availability cores, and outputs the candidate just declared available by, and included by aka leaving, that availability core. We drop any samples that return no candidate because no candidate was leaving the sampled availability core in this relay chain block. We choose three samples initially, but we could make polkadot more secure and efficient by increasing this to four or five, and reducing the backing checks accordingly. All successful `RelayVRFModulo` samples are assigned delay tranche zero. There is no sampling process for `RelayVRFDelay` and `RelayEquivocation`. We instead run them on specific candidates and they compute a delay from their VRF output. `RelayVRFDelay` runs for all candidates included under, aka declared available by, a relay chain block, and inputs the associated VRF output via `RelayVRFStory`. `RelayEquivocation` runs only on candidate block equivocations, and inputs their block hashes via the `RelayEquivocation` story. -`RelayVRFDelay` and `RelayEquivocation` both compute their output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "tranche" and reduce the result modulo `num_delay_tranches + zeroth_delay_tranche_width`, and consolidate results 0 through `zeroth_delay_tranche_width` to be 0. In this way, they ensure the zeroth delay tranche has `zeroth_delay_tranche_width+1` times as many assignments as any other tranche. +`RelayVRFDelay` and `RelayEquivocation` both compute their output with `schnorrkel::vrf::VRFInOut::make_bytes` using the context "A&V Tranche" and reduce the result modulo `num_delay_tranches + zeroth_delay_tranche_width`, and consolidate results 0 through `zeroth_delay_tranche_width` to be 0. In this way, they ensure the zeroth delay tranche has `zeroth_delay_tranche_width+1` times as many assignments as any other tranche. As future work (or TODO?), we should merge assignment notices with the same delay and story using `vrf_merge`. We cannot merge those with the same delay and different stories because `RelayEquivocationStory`s could change but `RelayVRFStory` never changes. @@ -152,7 +152,7 @@ TODO: When? Is this optimal for the network? etc. ## On-chain verification -We should verify approval on-chain to reward approval checkers and to simplify integration with GRANDPA. We therefore require the "no show" timeout to be longer than a relay chain slot so that we can witness "no shows" on-chain, which helps with both these goals. +We should verify approval on-chain to reward approval checkers. We therefore require the "no show" timeout to be longer than a relay chain slot so that we can witness "no shows" on-chain, which helps with this goal. The major challenge with an on-chain record of the off-chain process is adversarial block producers who may either censor votes or publish votes to the chain which cause other votes to be ignored and unrewards (reward stealing). In principle, all validators have some "tranche" at which they're assigned to the parachain candidate, which ensures we reach enough validators eventually. As noted above, we often retract "no shows" when the slow validator eventually shows up, so witnessing their initially being a "no show" helps manage rewards. @@ -186,6 +186,14 @@ Any validator could send their assignment notices and/or approval votes too earl Assignment notices being gossiped too early might create a denial of service vector. If so, we might exploit the relative time scheme that synchronises our clocks, which conceivably permits just dropping excessively early assignments. +## Finality GRANDPA Voting Rule + +The relay-chain requires validators to participate in GRANDPA. In GRANDPA, validators submit off-chain votes on what they believe to be the best block of the chain, and GRANDPA determines the common block contained by a supermajority of sub-chains. There are also additional constraints on what can be submitted based on results of previous rounds of voting. + +In order to avoid finalizing anything which has not received enough approval votes or is disputed, we will pair the approval protocol with an alteration to the GRANDPA voting strategy for honest nodes which causes them to vote only on chains where every parachain candidate within has been approved. Furthermore, the voting rule prevents voting for chains where there is any live dispute or any dispute has resolved to a candidate being invalid. + +Thus, the finalized relay-chain should contain only relay-chain blocks where a majority believe that every block within has been sufficiently approved. + ### Future work We could consider additional gossip messages with which nodes claims "slow availability" and/or "slow candidate" to fine tune the assignments "no show" system, but long enough "no show" delays suffice probably. diff --git a/roadmap/implementers-guide/src/runtime-api/historical_validation_code.md b/roadmap/implementers-guide/src/runtime-api/historical_validation_code.md new file mode 100644 index 0000000000000000000000000000000000000000..0b63f247df9360f5967bf68fd68ff04dadd5de13 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime-api/historical_validation_code.md @@ -0,0 +1,7 @@ +# Historical Validation Code + +Fetch the historical validation code used by a para for candidates executed in the context of a given block height in the current chain. + +```rust +fn historical_validation_code(at: Block, para_id: ParaId, context_height: BlockNumber) -> Option; +``` diff --git a/roadmap/implementers-guide/src/runtime-api/validator-groups.md b/roadmap/implementers-guide/src/runtime-api/validator-groups.md index 42b39f976d19977df6e882127dd759fe9596d370..75a94e234979f496f2d1268ae0b9c08a11d25378 100644 --- a/roadmap/implementers-guide/src/runtime-api/validator-groups.md +++ b/roadmap/implementers-guide/src/runtime-api/validator-groups.md @@ -17,14 +17,10 @@ impl GroupRotationInfo { /// Returns the block number of the next rotation after the current block. If the current block /// is 10 and the rotation frequency is 5, this should return 15. - /// - /// If the group rotation frequency is 0, returns 0. fn next_rotation_at(&self) -> BlockNumber; /// Returns the block number of the last rotation before or including the current block. If the /// current block is 10 and the rotation frequency is 5, this should return 10. - /// - /// If the group rotation frequency is 0, returns 0. fn last_rotation_at(&self) -> BlockNumber; } diff --git a/roadmap/implementers-guide/src/runtime/dmp.md b/roadmap/implementers-guide/src/runtime/dmp.md new file mode 100644 index 0000000000000000000000000000000000000000..6f125ca46b5eca28035dc72566d2c634af0c7102 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/dmp.md @@ -0,0 +1,59 @@ +# DMP Module + +A module responsible for Downward Message Processing (DMP). See [Messaging Overview](../messaging.md) for more details. + +## Storage + +General storage entries + +```rust +/// Paras that are to be cleaned up at the end of the session. +/// The entries are sorted ascending by the para id. +OutgoingParas: Vec; +``` + +Storage layout required for implementation of DMP. + +```rust +/// The downward messages addressed for a certain para. +DownwardMessageQueues: map ParaId => Vec; +/// A mapping that stores the downward message queue MQC head for each para. +/// +/// Each link in this chain has a form: +/// `(prev_head, B, H(M))`, where +/// - `prev_head`: is the previous head hash or zero if none. +/// - `B`: is the relay-chain block number in which a message was appended. +/// - `H(M)`: is the hash of the message being appended. +DownwardMessageQueueHeads: map ParaId => Hash; +``` + +## Initialization + +No initialization routine runs for this module. + +## Routines + +Candidate Acceptance Function: + +* `check_processed_downward_messages(P: ParaId, processed_downward_messages: u32)`: + 1. Checks that `DownwardMessageQueues` for `P` is at least `processed_downward_messages` long. + 1. Checks that `processed_downward_messages` is at least 1 if `DownwardMessageQueues` for `P` is not empty. + +Candidate Enactment: + +* `prune_dmq(P: ParaId, processed_downward_messages: u32)`: + 1. Remove the first `processed_downward_messages` from the `DownwardMessageQueues` of `P`. + +Utility routines. + +`queue_downward_message(P: ParaId, M: DownwardMessage)`: + 1. Check if the size of `M` exceeds the `config.max_downward_message_size`. If so, return an error. + 1. Wrap `M` into `InboundDownwardMessage` using the current block number for `sent_at`. + 1. Obtain a new MQC link for the resulting `InboundDownwardMessage` and replace `DownwardMessageQueueHeads` for `P` with the resulting hash. + 1. Add the resulting `InboundDownwardMessage` into `DownwardMessageQueues` for `P`. + +## Session Change + +1. Drain `OutgoingParas`. For each `P` happened to be in the list: + 1. Remove all `DownwardMessageQueues` of `P`. + 1. Remove `DownwardMessageQueueHeads` for `P`. diff --git a/roadmap/implementers-guide/src/runtime/hrmp.md b/roadmap/implementers-guide/src/runtime/hrmp.md new file mode 100644 index 0000000000000000000000000000000000000000..145a2f28453050a75684bee40ed780abf50e8fca --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/hrmp.md @@ -0,0 +1,280 @@ +# HRMP Module + +A module responsible for Horizontally Relay-routed Message Passing (HRMP). See [Messaging Overview](../messaging.md) for more details. + +## Storage + +General storage entries + +```rust +/// Paras that are to be cleaned up at the end of the session. +/// The entries are sorted ascending by the para id. +OutgoingParas: Vec; +``` + +HRMP related structs: + +```rust +/// A description of a request to open an HRMP channel. +struct HrmpOpenChannelRequest { + /// Indicates if this request was confirmed by the recipient. + confirmed: bool, + /// How many session boundaries ago this request was seen. + age: SessionIndex, + /// The amount that the sender supplied at the time of creation of this request. + sender_deposit: Balance, + /// The maximum message size that could be put into the channel. + max_message_size: u32, + /// The maximum number of messages that can be pending in the channel at once. + max_capacity: u32, + /// The maximum total size of the messages that can be pending in the channel at once. + max_total_size: u32, +} + +/// A metadata of an HRMP channel. +struct HrmpChannel { + /// The amount that the sender supplied as a deposit when opening this channel. + sender_deposit: Balance, + /// The amount that the recipient supplied as a deposit when accepting opening this channel. + recipient_deposit: Balance, + /// The maximum number of messages that can be pending in the channel at once. + max_capacity: u32, + /// The maximum total size of the messages that can be pending in the channel at once. + max_total_size: u32, + /// The maximum message size that could be put into the channel. + max_message_size: u32, + /// The current number of messages pending in the channel. + /// Invariant: should be less or equal to `max_capacity`. + msg_count: u32, + /// The total size in bytes of all message payloads in the channel. + /// Invariant: should be less or equal to `max_total_size`. + total_size: u32, + /// A head of the Message Queue Chain for this channel. Each link in this chain has a form: + /// `(prev_head, B, H(M))`, where + /// - `prev_head`: is the previous value of `mqc_head` or zero if none. + /// - `B`: is the [relay-chain] block number in which a message was appended + /// - `H(M)`: is the hash of the message being appended. + /// This value is initialized to a special value that consists of all zeroes which indicates + /// that no messages were previously added. + mqc_head: Option, +} +``` +HRMP related storage layout + +```rust +/// The set of pending HRMP open channel requests. +/// +/// The set is accompanied by a list for iteration. +/// +/// Invariant: +/// - There are no channels that exists in list but not in the set and vice versa. +HrmpOpenChannelRequests: map HrmpChannelId => Option; +HrmpOpenChannelRequestsList: Vec; + +/// This mapping tracks how many open channel requests are inititated by a given sender para. +/// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` +/// as the number of `HrmpOpenChannelRequestCount` for `X`. +HrmpOpenChannelRequestCount: map ParaId => u32; +/// This mapping tracks how many open channel requests were accepted by a given recipient para. +/// Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with +/// `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`. +HrmpAcceptedChannelRequestCount: map ParaId => u32; + +/// A set of pending HRMP close channel requests that are going to be closed during the session change. +/// Used for checking if a given channel is registered for closure. +/// +/// The set is accompanied by a list for iteration. +/// +/// Invariant: +/// - There are no channels that exists in list but not in the set and vice versa. +HrmpCloseChannelRequests: map HrmpChannelId => Option<()>; +HrmpCloseChannelRequestsList: Vec; + +/// The HRMP watermark associated with each para. +/// Invariant: +/// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session. +HrmpWatermarks: map ParaId => Option; +/// HRMP channel data associated with each para. +/// Invariant: +/// - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session. +HrmpChannels: map HrmpChannelId => Option; +/// Ingress/egress indexes allow to find all the senders and receivers given the opposite +/// side. I.e. +/// +/// (a) ingress index allows to find all the senders for a given recipient. +/// (b) egress index allows to find all the recipients for a given sender. +/// +/// Invariants: +/// - for each ingress index entry for `P` each item `I` in the index should present in `HrmpChannels` +/// as `(I, P)`. +/// - for each egress index entry for `P` each item `E` in the index should present in `HrmpChannels` +/// as `(P, E)`. +/// - there should be no other dangling channels in `HrmpChannels`. +/// - the vectors are sorted. +HrmpIngressChannelsIndex: map ParaId => Vec; +HrmpEgressChannelsIndex: map ParaId => Vec; +/// Storage for the messages for each channel. +/// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. +HrmpChannelContents: map HrmpChannelId => Vec; +/// Maintains a mapping that can be used to answer the question: +/// What paras sent a message at the given block number for a given reciever. +/// Invariants: +/// - The inner `Vec` is never empty. +/// - The inner `Vec` cannot store two same `ParaId`. +/// - The outer vector is sorted ascending by block number and cannot store two items with the same +/// block number. +HrmpChannelDigests: map ParaId => Vec<(BlockNumber, Vec)>; +``` + +## Initialization + +No initialization routine runs for this module. + +## Routines + +Candidate Acceptance Function: + +* `check_hrmp_watermark(P: ParaId, new_hrmp_watermark)`: + 1. `new_hrmp_watermark` should be strictly greater than the value of `HrmpWatermarks` for `P` (if any). + 1. `new_hrmp_watermark` must not be greater than the context's block number. + 1. `new_hrmp_watermark` should be either + 1. equal to the context's block number + 1. or in `HrmpChannelDigests` for `P` an entry with the block number should exist +* `check_outbound_hrmp(sender: ParaId, Vec)`: + 1. Checks that there are at most `config.hrmp_max_message_num_per_candidate` messages. + 1. Checks that horizontal messages are sorted by ascending recipient ParaId and there is no two horizontal messages have the same recipient. + 1. For each horizontal message `M` with the channel `C` identified by `(sender, M.recipient)` check: + 1. exists + 1. `M`'s payload size doesn't exceed a preconfigured limit `C.max_message_size` + 1. `M`'s payload size summed with the `C.total_size` doesn't exceed a preconfigured limit `C.max_total_size`. + 1. `C.msg_count + 1` doesn't exceed a preconfigured limit `C.max_capacity`. + +Candidate Enactment: + +* `queue_outbound_hrmp(sender: ParaId, Vec)`: + 1. For each horizontal message `HM` with the channel `C` identified by `(sender, HM.recipient)`: + 1. Append `HM` into `HrmpChannelContents` that corresponds to `C` with `sent_at` equals to the current block number. + 1. Locate or create an entry in `HrmpChannelDigests` for `HM.recipient` and append `sender` into the entry's list. + 1. Increment `C.msg_count` + 1. Increment `C.total_size` by `HM`'s payload size + 1. Append a new link to the MQC and save the new head in `C.mqc_head`. Note that the current block number as of enactment is used for the link. +* `prune_hrmp(recipient, new_hrmp_watermark)`: + 1. From `HrmpChannelDigests` for `recipient` remove all entries up to an entry with block number equal to `new_hrmp_watermark`. + 1. From the removed digests construct a set of paras that sent new messages within the interval between the old and new watermarks. + 1. For each channel `C` identified by `(sender, recipient)` for each `sender` coming from the set, prune messages up to the `new_hrmp_watermark`. + 1. For each pruned message `M` from channel `C`: + 1. Decrement `C.msg_count` + 1. Decrement `C.total_size` by `M`'s payload size. + 1. Set `HrmpWatermarks` for `P` to be equal to `new_hrmp_watermark` + > NOTE: That collecting digests can be inefficient and the time it takes grows very fast. Thanks to the aggresive + > parametrization this shouldn't be a big of a deal. + > If that becomes a problem consider introducing an extra dictionary which says at what block the given sender + > sent a message to the recipient. + +The following routine is intended to be called in the same time when `Paras::schedule_para_cleanup` is called. + +`schedule_para_cleanup(ParaId)`: + 1. Add the para into the `OutgoingParas` vector maintaining the sorted order. + +## Entry-points + +The following entry-points are meant to be used for HRMP channel management. + +Those entry-points are meant to be called from a parachain. `origin` is defined as the `ParaId` of +the parachain executed the message. + +* `hrmp_init_open_channel(recipient, proposed_max_capacity, proposed_max_message_size)`: + 1. Check that the `origin` is not `recipient`. + 1. Check that `proposed_max_capacity` is less or equal to `config.hrmp_channel_max_capacity` and greater than zero. + 1. Check that `proposed_max_message_size` is less or equal to `config.hrmp_channel_max_message_size` and greater than zero. + 1. Check that `recipient` is a valid para. + 1. Check that there is no existing channel for `(origin, recipient)` in `HrmpChannels`. + 1. Check that there is no existing open channel request (`origin`, `recipient`) in `HrmpOpenChannelRequests`. + 1. Check that the sum of the number of already opened HRMP channels by the `origin` (the size + of the set found `HrmpEgressChannelsIndex` for `origin`) and the number of open requests by the + `origin` (the value from `HrmpOpenChannelRequestCount` for `origin`) doesn't exceed the limit of + channels (`config.hrmp_max_parachain_outbound_channels` or `config.hrmp_max_parathread_outbound_channels`) minus 1. + 1. Check that `origin`'s balance is more or equal to `config.hrmp_sender_deposit` + 1. Reserve the deposit for the `origin` according to `config.hrmp_sender_deposit` + 1. Increase `HrmpOpenChannelRequestCount` by 1 for `origin`. + 1. Append `(origin, recipient)` to `HrmpOpenChannelRequestsList`. + 1. Add a new entry to `HrmpOpenChannelRequests` for `(origin, recipient)` + 1. Set `sender_deposit` to `config.hrmp_sender_deposit` + 1. Set `max_capacity` to `proposed_max_capacity` + 1. Set `max_message_size` to `proposed_max_message_size` + 1. Set `max_total_size` to `config.hrmp_channel_max_total_size` + 1. Send a downward message to `recipient` notifying about an inbound HRMP channel request. + - The DM is sent using `queue_downward_message`. + - The DM is represented by the `HrmpNewChannelOpenRequest` XCM message. + - `sender` is set to `origin`, + - `max_message_size` is set to `proposed_max_message_size`, + - `max_capacity` is set to `proposed_max_capacity`. +* `hrmp_accept_open_channel(sender)`: + 1. Check that there is an existing request between (`sender`, `origin`) in `HrmpOpenChannelRequests` + 1. Check that it is not confirmed. + 1. Check that the sum of the number of inbound HRMP channels opened to `origin` (the size of the set + found in `HrmpIngressChannelsIndex` for `origin`) and the number of accepted open requests by the `origin` + (the value from `HrmpAcceptedChannelRequestCount` for `origin`) doesn't exceed the limit of channels + (`config.hrmp_max_parachain_inbound_channels` or `config.hrmp_max_parathread_inbound_channels`) + minus 1. + 1. Check that `origin`'s balance is more or equal to `config.hrmp_recipient_deposit`. + 1. Reserve the deposit for the `origin` according to `config.hrmp_recipient_deposit` + 1. For the request in `HrmpOpenChannelRequests` identified by `(sender, P)`, set `confirmed` flag to `true`. + 1. Increase `HrmpAcceptedChannelRequestCount` by 1 for `origin`. + 1. Send a downward message to `sender` notifying that the channel request was accepted. + - The DM is sent using `queue_downward_message`. + - The DM is represented by the `HrmpChannelAccepted` XCM message. + - `recipient` is set to `origin`. +* `hrmp_close_channel(ch)`: + 1. Check that `origin` is either `ch.sender` or `ch.recipient` + 1. Check that `HrmpChannels` for `ch` exists. + 1. Check that `ch` is not in the `HrmpCloseChannelRequests` set. + 1. If not already there, insert a new entry `Some(())` to `HrmpCloseChannelRequests` for `ch` + and append `ch` to `HrmpCloseChannelRequestsList`. + 1. Send a downward message to the opposite party notifying about the channel closing. + - The DM is sent using `queue_downward_message`. + - The DM is represented by the `HrmpChannelClosing` XCM message with: + - `initator` is set to `origin`, + - `sender` is set to `ch.sender`, + - `recipient` is set to `ch.recipient`. + - The opposite party is `ch.sender` if `origin` is `ch.recipient` and `ch.recipient` if `origin` is `ch.sender`. + +## Session Change + +1. Drain `OutgoingParas`. For each `P` happened to be in the list: + 1. Remove all inbound channels of `P`, i.e. `(_, P)`, + 1. Remove all outbound channels of `P`, i.e. `(P, _)`, + 1. Remove `HrmpOpenChannelRequestCount` for `P` + 1. Remove `HrmpAcceptedChannelRequestCount` for `P`. +1. For each channel designator `D` in `HrmpOpenChannelRequestsList` we query the request `R` from `HrmpOpenChannelRequests`: + 1. if `R.confirmed = false`: + 1. increment `R.age` by 1. + 1. if `R.age` reached a preconfigured time-to-live limit `config.hrmp_open_request_ttl`, then: + 1. refund `R.sender_deposit` to the sender + 1. decrement `HrmpOpenChannelRequestCount` for `D.sender` by 1. + 1. remove `R` + 1. remove `D` + 2. if `R.confirmed = true`, + 1. if both `D.sender` and `D.recipient` are not offboarded. + 1. create a new channel `C` between `(D.sender, D.recipient)`. + 1. Initialize the `C.sender_deposit` with `R.sender_deposit` and `C.recipient_deposit` + with the value found in the configuration `config.hrmp_recipient_deposit`. + 1. Insert `sender` into the set `HrmpIngressChannelsIndex` for the `recipient`. + 1. Insert `recipient` into the set `HrmpEgressChannelsIndex` for the `sender`. + 1. decrement `HrmpOpenChannelRequestCount` for `D.sender` by 1. + 1. decrement `HrmpAcceptedChannelRequestCount` for `D.recipient` by 1. + 1. remove `R` + 1. remove `D` +1. For each HRMP channel designator `D` in `HrmpCloseChannelRequestsList` + 1. remove the channel identified by `D`, if exists. + 1. remove `D` from `HrmpCloseChannelRequests`. + 1. remove `D` from `HrmpCloseChannelRequestsList` + +To remove a HRMP channel `C` identified with a tuple `(sender, recipient)`: + +1. Return `C.sender_deposit` to the `sender`. +1. Return `C.recipient_deposit` to the `recipient`. +1. Remove `C` from `HrmpChannels`. +1. Remove `C` from `HrmpChannelContents`. +1. Remove `recipient` from the set `HrmpEgressChannelsIndex` for `sender`. +1. Remove `sender` from the set `HrmpIngressChannelsIndex` for `recipient`. diff --git a/roadmap/implementers-guide/src/runtime/inclusion.md b/roadmap/implementers-guide/src/runtime/inclusion.md index 17dbdc94cc022834a326ec4c48057ee93cc13d82..f2d9f214225ab42d0042d164af327b9dfe7144db 100644 --- a/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/roadmap/implementers-guide/src/runtime/inclusion.md @@ -56,7 +56,6 @@ All failed checks should lead to an unrecoverable error making the block invalid 1. apply each bit of bitfield to the corresponding pending candidate. looking up parathread cores using the `core_lookup`. Disregard bitfields that have a `1` bit for any free cores. 1. For each applied bit of each availability-bitfield, set the bit for the validator in the `CandidatePendingAvailability`'s `availability_votes` bitfield. Track all candidates that now have >2/3 of bits set in their `availability_votes`. These candidates are now available and can be enacted. 1. For all now-available candidates, invoke the `enact_candidate` routine with the candidate and relay-parent number. - 1. > TODO: pass it onwards to `Validity` module. 1. Return a list of freed cores consisting of the cores where candidates have become available. * `process_candidates(BackedCandidates, scheduled: Vec, group_validators: Fn(GroupIndex) -> Option>)`: 1. check that each candidate corresponds to a scheduled core and that they are ordered in the same order the cores appear in assignments in `scheduled`. @@ -68,21 +67,20 @@ All failed checks should lead to an unrecoverable error making the block invalid 1. Ensure that any code upgrade scheduled by the candidate does not happen within `config.validation_upgrade_frequency` of `Paras::last_code_upgrade(para_id, true)`, if any, comparing against the value of `Paras::FutureCodeUpgrades` for the given para ID. 1. Check the collator's signature on the candidate data. 1. check the backing of the candidate using the signatures and the bitfields, comparing against the validators assigned to the groups, fetched with the `group_validators` lookup. - 1. call `Router::check_upward_messages(para, commitments.upward_messages)` to check that the upward messages are valid. - 1. call `Router::check_processed_downward_messages(para, commitments.processed_downward_messages)` to check that the DMQ is properly drained. - 1. call `Router::check_hrmp_watermark(para, commitments.hrmp_watermark)` for each candidate to check rules of processing the HRMP watermark. - 1. check that in the commitments of each candidate the horizontal messages are sorted by ascending recipient ParaId and there is no two horizontal messages have the same recipient. - 1. using `Router::verify_outbound_hrmp(sender, commitments.horizontal_messages)` ensure that the each candidate send a valid set of horizontal messages + 1. call `Ump::check_upward_messages(para, commitments.upward_messages)` to check that the upward messages are valid. + 1. call `Dmp::check_processed_downward_messages(para, commitments.processed_downward_messages)` to check that the DMQ is properly drained. + 1. call `Hrmp::check_hrmp_watermark(para, commitments.hrmp_watermark)` for each candidate to check rules of processing the HRMP watermark. + 1. using `Hrmp::check_outbound_hrmp(sender, commitments.horizontal_messages)` ensure that the each candidate sent a valid set of horizontal messages 1. create an entry in the `PendingAvailability` map for each backed candidate with a blank `availability_votes` bitfield. 1. create a corresponding entry in the `PendingAvailabilityCommitments` with the commitments. 1. Return a `Vec` of all scheduled cores of the list of passed assignments that a candidate was successfully backed for, sorted ascending by CoreIndex. * `enact_candidate(relay_parent_number: BlockNumber, CommittedCandidateReceipt)`: 1. If the receipt contains a code upgrade, Call `Paras::schedule_code_upgrade(para_id, code, relay_parent_number + config.validationl_upgrade_delay)`. > TODO: Note that this is safe as long as we never enact candidates where the relay parent is across a session boundary. In that case, which we should be careful to avoid with contextual execution, the configuration might have changed and the para may de-sync from the host's understanding of it. - 1. call `Router::enact_upward_messages` for each backed candidate, using the [`UpwardMessage`s](../types/messages.md#upward-message) from the [`CandidateCommitments`](../types/candidate.md#candidate-commitments). - 1. call `Router::queue_outbound_hrmp` with the para id of the candidate and the list of horizontal messages taken from the commitment, - 1. call `Router::prune_hrmp` with the para id of the candiate and the candidate's `hrmp_watermark`. - 1. call `Router::prune_dmq` with the para id of the candidate and the candidate's `processed_downward_messages`. + 1. call `Ump::enact_upward_messages` for each backed candidate, using the [`UpwardMessage`s](../types/messages.md#upward-message) from the [`CandidateCommitments`](../types/candidate.md#candidate-commitments). + 1. call `Dmp::prune_dmq` with the para id of the candidate and the candidate's `processed_downward_messages`. + 1. call `Hrmp::prune_hrmp` with the para id of the candiate and the candidate's `hrmp_watermark`. + 1. call `Hrmp::queue_outbound_hrmp` with the para id of the candidate and the list of horizontal messages taken from the commitment, 1. Call `Paras::note_new_head` using the `HeadData` from the receipt and `relay_parent_number`. * `collect_pending`: diff --git a/roadmap/implementers-guide/src/runtime/inclusioninherent.md b/roadmap/implementers-guide/src/runtime/inclusioninherent.md index 990fd4a32b9ae69ccfe11dea8155911c58edbc01..54ebf3af7b5212778d2aa844b8babbf18ee3a6f7 100644 --- a/roadmap/implementers-guide/src/runtime/inclusioninherent.md +++ b/roadmap/implementers-guide/src/runtime/inclusioninherent.md @@ -22,5 +22,5 @@ Included: Option<()>, 1. Invoke `Scheduler::schedule(freed)` 1. Invoke the `Inclusion::process_candidates` routine with the parameters `(backed_candidates, Scheduler::scheduled(), Scheduler::group_validators)`. 1. Call `Scheduler::occupied` using the return value of the `Inclusion::process_candidates` call above, first sorting the list of assigned core indices. - 1. Call the `Router::process_pending_upward_dispatchables` routine to execute all messages in upward dispatch queues. + 1. Call the `Ump::process_pending_upward_messages` routine to execute all messages in upward dispatch queues. 1. If all of the above succeeds, set `Included` to `Some(())`. diff --git a/roadmap/implementers-guide/src/runtime/initializer.md b/roadmap/implementers-guide/src/runtime/initializer.md index 5fd2bc3bd60f2471f04db948c70fe4b63b185b73..361fab38c811319a5f0b329edd6f889b1c6b36b1 100644 --- a/roadmap/implementers-guide/src/runtime/initializer.md +++ b/roadmap/implementers-guide/src/runtime/initializer.md @@ -23,8 +23,10 @@ The other parachains modules are initialized in this order: 1. Paras 1. Scheduler 1. Inclusion -1. Validity. -1. Router. +1. SessionInfo +1. DMP +1. UMP +1. HRMP The [Configuration Module](configuration.md) is first, since all other modules need to operate under the same configuration as each other. It would lead to inconsistency if, for example, the scheduler ran first and then the configuration was updated before the Inclusion module. diff --git a/roadmap/implementers-guide/src/runtime/paras.md b/roadmap/implementers-guide/src/runtime/paras.md index dbb169af17514a530e50cbe12bb99733c4ca394f..0958c88d510fd1a399bdc6622ff9384eb61b3a59 100644 --- a/roadmap/implementers-guide/src/runtime/paras.md +++ b/roadmap/implementers-guide/src/runtime/paras.md @@ -111,6 +111,7 @@ OutgoingParas: Vec; * `note_new_head(ParaId, HeadData, BlockNumber)`: note that a para has progressed to a new head, where the new head was executed in the context of a relay-chain block with given number. This will apply pending code upgrades based on the block number provided. * `validation_code_at(ParaId, at: BlockNumber, assume_intermediate: Option)`: Fetches the validation code to be used when validating a block in the context of the given relay-chain height. A second block number parameter may be used to tell the lookup to proceed as if an intermediate parablock has been included at the given relay-chain height. This may return past, current, or (with certain choices of `assume_intermediate`) future code. `assume_intermediate`, if provided, must be before `at`. If the validation code has been pruned, this will return `None`. * `is_parathread(ParaId) -> bool`: Returns true if the para ID references any live parathread. +* `is_valid_para(ParaId) -> bool`: Returns true if the para ID references either a live parathread or live parachain. * `last_code_upgrade(id: ParaId, include_future: bool) -> Option`: The block number of the last scheduled upgrade of the requested para. Includes future upgrades if the flag is set. This is the `expected_at` number, not the `activated_at` number. * `persisted_validation_data(id: ParaId) -> Option`: Get the PersistedValidationData of the given para, assuming the context is the parent block. Returns `None` if the para is not known. diff --git a/roadmap/implementers-guide/src/runtime/router.md b/roadmap/implementers-guide/src/runtime/router.md deleted file mode 100644 index 500fa6688d940f804d3efea40b8e3820f804fb92..0000000000000000000000000000000000000000 --- a/roadmap/implementers-guide/src/runtime/router.md +++ /dev/null @@ -1,322 +0,0 @@ -# Router Module - -The Router module is responsible for all messaging mechanisms supported between paras and the relay chain, specifically: UMP, DMP, HRMP and later XCMP. - -## Storage - -General storage entries - -```rust -/// Paras that are to be cleaned up at the end of the session. -/// The entries are sorted ascending by the para id. -OutgoingParas: Vec; -``` - -### Upward Message Passing (UMP) - -```rust -/// Dispatchable objects ready to be dispatched onto the relay chain. The messages are processed in FIFO order. -RelayDispatchQueues: map ParaId => Vec<(ParachainDispatchOrigin, RawDispatchable)>; -/// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. -/// First item in the tuple is the count of messages and second -/// is the total length (in bytes) of the message payloads. -/// -/// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of -/// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of -/// loading the whole message queue if only the total size and count are required. -RelayDispatchQueueSize: map ParaId => (u32, u32); -/// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. -NeedsDispatch: Vec; -/// This is the para that will get dispatched first during the next upward dispatchable queue -/// execution round. -NextDispatchRoundStartWith: Option; -``` - -### Downward Message Passing (DMP) - -Storage layout required for implementation of DMP. - -```rust -/// The downward messages addressed for a certain para. -DownwardMessageQueues: map ParaId => Vec; -/// A mapping that stores the downward message queue MQC head for each para. -/// -/// Each link in this chain has a form: -/// `(prev_head, B, H(M))`, where -/// - `prev_head`: is the previous head hash or zero if none. -/// - `B`: is the relay-chain block number in which a message was appended. -/// - `H(M)`: is the hash of the message being appended. -DownwardMessageQueueHeads: map ParaId => Option; -``` - -### HRMP - -HRMP related structs: - -```rust -/// A description of a request to open an HRMP channel. -struct HrmpOpenChannelRequest { - /// Indicates if this request was confirmed by the recipient. - confirmed: bool, - /// How many session boundaries ago this request was seen. - age: SessionIndex, - /// The amount that the sender supplied at the time of creation of this request. - sender_deposit: Balance, - /// The maximum number of messages that can be pending in the channel at once. - limit_used_places: u32, - /// The maximum total size of the messages that can be pending in the channel at once. - limit_used_bytes: u32, -} - -/// A metadata of an HRMP channel. -struct HrmpChannel { - /// The amount that the sender supplied as a deposit when opening this channel. - sender_deposit: Balance, - /// The amount that the recipient supplied as a deposit when accepting opening this channel. - recipient_deposit: Balance, - /// The maximum number of messages that can be pending in the channel at once. - limit_used_places: u32, - /// The maximum total size of the messages that can be pending in the channel at once. - limit_used_bytes: u32, - /// The maximum message size that could be put into the channel. - limit_message_size: u32, - /// The current number of messages pending in the channel. - /// Invariant: should be less or equal to `limit_used_places`. - used_places: u32, - /// The total size in bytes of all message payloads in the channel. - /// Invariant: should be less or equal to `limit_used_bytes`. - used_bytes: u32, - /// A head of the Message Queue Chain for this channel. Each link in this chain has a form: - /// `(prev_head, B, H(M))`, where - /// - `prev_head`: is the previous value of `mqc_head`. - /// - `B`: is the [relay-chain] block number in which a message was appended - /// - `H(M)`: is the hash of the message being appended. - /// This value is initialized to a special value that consists of all zeroes which indicates - /// that no messages were previously added. - mqc_head: Hash, -} -``` -HRMP related storage layout - -```rust -/// The set of pending HRMP open channel requests. -/// -/// The set is accompanied by a list for iteration. -/// -/// Invariant: -/// - There are no channels that exists in list but not in the set and vice versa. -HrmpOpenChannelRequests: map HrmpChannelId => Option; -HrmpOpenChannelRequestsList: Vec; - -/// This mapping tracks how many open channel requests are inititated by a given sender para. -/// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` -/// as the number of `HrmpOpenChannelRequestCount` for `X`. -HrmpOpenChannelRequestCount: map ParaId => u32; -/// This mapping tracks how many open channel requests were accepted by a given recipient para. -/// Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with -/// `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`. -HrmpAcceptedChannelRequestCount: map ParaId => u32; - -/// A set of pending HRMP close channel requests that are going to be closed during the session change. -/// Used for checking if a given channel is registered for closure. -/// -/// The set is accompanied by a list for iteration. -/// -/// Invariant: -/// - There are no channels that exists in list but not in the set and vice versa. -HrmpCloseChannelRequests: map HrmpChannelId => Option<()>; -HrmpCloseChannelRequestsList: Vec; - -/// The HRMP watermark associated with each para. -HrmpWatermarks: map ParaId => Option; -/// HRMP channel data associated with each para. -HrmpChannels: map HrmpChannelId => Option; -/// The indexes that map all senders to their recievers and vise versa. -/// Invariants: -/// - for each ingress index entry for `P` each item `I` in the index should present in `HrmpChannels` as `(I, P)`. -/// - for each egress index entry for `P` each item `E` in the index should present in `HrmpChannels` as `(P, E)`. -/// - there should be no other dangling channels in `HrmpChannels`. -HrmpIngressChannelsIndex: map ParaId => Vec; -HrmpEgressChannelsIndex: map ParaId => Vec; -/// Storage for the messages for each channel. -/// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. -HrmpChannelContents: map HrmpChannelId => Vec; -/// Maintains a mapping that can be used to answer the question: -/// What paras sent a message at the given block number for a given reciever. -/// Invariant: The para ids vector is never empty. -HrmpChannelDigests: map ParaId => Vec<(BlockNumber, Vec)>; -``` - -## Initialization - -No initialization routine runs for this module. - -## Routines - -Candidate Acceptance Function: - -* `check_upward_messages(P: ParaId, Vec`): - 1. Checks that there are at most `config.max_upward_message_num_per_candidate` messages. - 1. Checks each upward message `M` individually depending on its kind: - 1. If the message kind is `Dispatchable`: - 1. Verify that `RelayDispatchQueueSize` for `P` has enough capacity for the message (NOTE that should include all processed - upward messages of the `Dispatchable` kind up to this point!) - 1. If the message kind is `HrmpInitOpenChannel(recipient, max_places, max_message_size)`: - 1. Check that the `P` is not `recipient`. - 1. Check that `max_places` is less or equal to `config.hrmp_channel_max_places`. - 1. Check that `max_message_size` is less or equal to `config.hrmp_channel_max_message_size`. - 1. Check that `recipient` is a valid para. - 1. Check that there is no existing channel for `(P, recipient)` in `HrmpChannels`. - 1. Check that there is no existing open channel request (`P`, `recipient`) in `HrmpOpenChannelRequests`. - 1. Check that the sum of the number of already opened HRMP channels by the `P` (the size - of the set found `HrmpEgressChannelsIndex` for `P`) and the number of open requests by the - `P` (the value from `HrmpOpenChannelRequestCount` for `P`) doesn't exceed the limit of - channels (`config.hrmp_max_parachain_outbound_channels` or `config.hrmp_max_parathread_outbound_channels`) minus 1. - 1. Check that `P`'s balance is more or equal to `config.hrmp_sender_deposit` - 1. If the message kind is `HrmpAcceptOpenChannel(sender)`: - 1. Check that there is an existing request between (`sender`, `P`) in `HrmpOpenChannelRequests` - 1. Check that it is not confirmed. - 1. Check that `P`'s balance is more or equal to `config.hrmp_recipient_deposit`. - 1. Check that the sum of the number of inbound HRMP channels opened to `P` (the size of the set - found in `HrmpIngressChannelsIndex` for `P`) and the number of accepted open requests by the `P` - (the value from `HrmpAcceptedChannelRequestCount` for `P`) doesn't exceed the limit of channels - (`config.hrmp_max_parachain_inbound_channels` or `config.hrmp_max_parathread_inbound_channels`) - minus 1. - 1. If the message kind is `HrmpCloseChannel(ch)`: - 1. Check that `P` is either `ch.sender` or `ch.recipient` - 1. Check that `HrmpChannels` for `ch` exists. - 1. Check that `ch` is not in the `HrmpCloseChannelRequests` set. -* `check_processed_downward_messages(P: ParaId, processed_downward_messages)`: - 1. Checks that `DownwardMessageQueues` for `P` is at least `processed_downward_messages` long. - 1. Checks that `processed_downward_messages` is at least 1 if `DownwardMessageQueues` for `P` is not empty. -* `check_hrmp_watermark(P: ParaId, new_hrmp_watermark)`: - 1. `new_hrmp_watermark` should be strictly greater than the value of `HrmpWatermarks` for `P` (if any). - 1. `new_hrmp_watermark` must not be greater than the context's block number. - 1. `new_hrmp_watermark` should be either - 1. equal to the context's block number - 1. or in `HrmpChannelDigests` for `P` an entry with the block number should exist -* `verify_outbound_hrmp(sender: ParaId, Vec)`: - 1. For each horizontal message `M` with the channel `C` identified by `(sender, M.recipient)` check: - 1. exists - 1. `M`'s payload size doesn't exceed a preconfigured limit `C.limit_message_size` - 1. `M`'s payload size summed with the `C.used_bytes` doesn't exceed a preconfigured limit `C.limit_used_bytes`. - 1. `C.used_places + 1` doesn't exceed a preconfigured limit `C.limit_used_places`. - -Candidate Enactment: - -* `queue_outbound_hrmp(sender: ParaId, Vec)`: - 1. For each horizontal message `HM` with the channel `C` identified by `(sender, HM.recipient)`: - 1. Append `HM` into `HrmpChannelContents` that corresponds to `C` with `sent_at` equals to the current block number. - 1. Locate or create an entry in `HrmpChannelDigests` for `HM.recipient` and append `sender` into the entry's list. - 1. Increment `C.used_places` - 1. Increment `C.used_bytes` by `HM`'s payload size - 1. Append a new link to the MQC and save the new head in `C.mqc_head`. Note that the current block number as of enactment is used for the link. -* `prune_hrmp(recipient, new_hrmp_watermark)`: - 1. From `HrmpChannelDigests` for `recipient` remove all entries up to an entry with block number equal to `new_hrmp_watermark`. - 1. From the removed digests construct a set of paras that sent new messages within the interval between the old and new watermarks. - 1. For each channel `C` identified by `(sender, recipient)` for each `sender` coming from the set, prune messages up to the `new_hrmp_watermark`. - 1. For each pruned message `M` from channel `C`: - 1. Decrement `C.used_places` - 1. Decrement `C.used_bytes` by `M`'s payload size. - 1. Set `HrmpWatermarks` for `P` to be equal to `new_hrmp_watermark` -* `prune_dmq(P: ParaId, processed_downward_messages)`: - 1. Remove the first `processed_downward_messages` from the `DownwardMessageQueues` of `P`. -* `enact_upward_messages(P: ParaId, Vec)`: - 1. Process all upward messages in order depending on their kinds: - 1. If the message kind is `Dispatchable`: - 1. Append the message to `RelayDispatchQueues` for `P` - 1. Increment the size and the count in `RelayDispatchQueueSize` for `P`. - 1. Ensure that `P` is present in `NeedsDispatch`. - 1. If the message kind is `HrmpInitOpenChannel(recipient, max_places, max_message_size)`: - 1. Increase `HrmpOpenChannelRequestCount` by 1 for `P`. - 1. Append `(P, recipient)` to `HrmpOpenChannelRequestsList`. - 1. Add a new entry to `HrmpOpenChannelRequests` for `(sender, recipient)` - 1. Set `sender_deposit` to `config.hrmp_sender_deposit` - 1. Set `limit_used_places` to `max_places` - 1. Set `limit_message_size` to `max_message_size` - 1. Set `limit_used_bytes` to `config.hrmp_channel_max_size` - 1. Reserve the deposit for the `P` according to `config.hrmp_sender_deposit` - 1. If the message kind is `HrmpAcceptOpenChannel(sender)`: - 1. Reserve the deposit for the `P` according to `config.hrmp_recipient_deposit` - 1. For the request in `HrmpOpenChannelRequests` identified by `(sender, P)`, set `confirmed` flag to `true`. - 1. Increase `HrmpAcceptedChannelRequestCount` by 1 for `P`. - 1. If the message kind is `HrmpCloseChannel(ch)`: - 1. If not already there, insert a new entry `Some(())` to `HrmpCloseChannelRequests` for `ch` - and append `ch` to `HrmpCloseChannelRequestsList`. - -The following routine is intended to be called in the same time when `Paras::schedule_para_cleanup` is called. - -`schedule_para_cleanup(ParaId)`: - 1. Add the para into the `OutgoingParas` vector maintaining the sorted order. - -The following routine is meant to execute pending entries in upward dispatchable queues. This function doesn't fail, even if -any of dispatchables return an error. - -`process_pending_upward_dispatchables()`: - 1. Initialize a cumulative weight counter `T` to 0 - 1. Iterate over items in `NeedsDispatch` cyclically, starting with `NextDispatchRoundStartWith`. If the item specified is `None` start from the beginning. For each `P` encountered: - 1. Dequeue `D` the first dispatchable `D` from `RelayDispatchQueues` for `P` - 1. Decrement the size of the message from `RelayDispatchQueueSize` for `P` - 1. Decode `D` into a dispatchable. Otherwise, if succeeded: - 1. If `weight_of(D) > config.dispatchable_upward_message_critical_weight` then skip the dispatchable. Otherwise: - 1. Execute `D` and add the actual amount of weight consumed to `T`. - 1. If `weight_of(D) + T > config.preferred_dispatchable_upward_messages_step_weight`, set `NextDispatchRoundStartWith` to `P` and finish processing. - > NOTE that in practice we would need to approach the weight calculation more thoroughly, i.e. incorporate all operations - > that could take place on the course of handling these dispatchables. - 1. If `RelayDispatchQueues` for `P` became empty, remove `P` from `NeedsDispatch`. - 1. If `NeedsDispatch` became empty then finish processing and set `NextDispatchRoundStartWith` to `None`. - -Utility routines. - -`queue_downward_message(P: ParaId, M: DownwardMessage)`: - 1. Check if the serialized size of `M` exceeds the `config.critical_downward_message_size`. If so, return an error. - 1. Wrap `M` into `InboundDownwardMessage` using the current block number for `sent_at`. - 1. Obtain a new MQC link for the resulting `InboundDownwardMessage` and replace `DownwardMessageQueueHeads` for `P` with the resulting hash. - 1. Add the resulting `InboundDownwardMessage` into `DownwardMessageQueues` for `P`. - -## Session Change - -1. Drain `OutgoingParas`. For each `P` happened to be in the list: - 1. Remove all inbound channels of `P`, i.e. `(_, P)`, - 1. Remove all outbound channels of `P`, i.e. `(P, _)`, - 1. Remove all `DownwardMessageQueues` of `P`. - 1. Remove `DownwardMessageQueueHeads` for `P`. - 1. Remove `RelayDispatchQueueSize` of `P`. - 1. Remove `RelayDispatchQueues` of `P`. - 1. Remove `HrmpOpenChannelRequestCount` for `P` - 1. Remove `HrmpAcceptedChannelRequestCount` for `P`. - 1. Remove `P` if it exists in `NeedsDispatch`. - 1. If `P` is in `NextDispatchRoundStartWith`, then reset it to `None` - - Note that if we don't remove the open/close requests since they are going to die out naturally at the end of the session. -1. For each channel designator `D` in `HrmpOpenChannelRequestsList` we query the request `R` from `HrmpOpenChannelRequests`: - 1. if `R.confirmed = false`: - 1. increment `R.age` by 1. - 1. if `R.age` reached a preconfigured time-to-live limit `config.hrmp_open_request_ttl`, then: - 1. refund `R.sender_deposit` to the sender - 1. decrement `HrmpOpenChannelRequestCount` for `D.sender` by 1. - 1. remove `R` - 1. remove `D` - 2. if `R.confirmed = true`, - 1. if both `D.sender` and `D.recipient` are not offboarded. - 1. create a new channel `C` between `(D.sender, D.recipient)`. - 1. Initialize the `C.sender_deposit` with `R.sender_deposit` and `C.recipient_deposit` - with the value found in the configuration `config.hrmp_recipient_deposit`. - 1. Insert `sender` into the set `HrmpIngressChannelsIndex` for the `recipient`. - 1. Insert `recipient` into the set `HrmpEgressChannelsIndex` for the `sender`. - 1. decrement `HrmpOpenChannelRequestCount` for `D.sender` by 1. - 1. decrement `HrmpAcceptedChannelRequestCount` for `D.recipient` by 1. - 1. remove `R` - 1. remove `D` -1. For each HRMP channel designator `D` in `HrmpCloseChannelRequestsList` - 1. remove the channel identified by `D`, if exists. - 1. remove `D` from `HrmpCloseChannelRequests`. - 1. remove `D` from `HrmpCloseChannelRequestsList` - -To remove a HRMP channel `C` identified with a tuple `(sender, recipient)`: - -1. Return `C.sender_deposit` to the `sender`. -1. Return `C.recipient_deposit` to the `recipient`. -1. Remove `C` from `HrmpChannels`. -1. Remove `C` from `HrmpChannelContents`. -1. Remove `recipient` from the set `HrmpEgressChannelsIndex` for `sender`. -1. Remove `sender` from the set `HrmpIngressChannelsIndex` for `recipient`. diff --git a/roadmap/implementers-guide/src/runtime/scheduler.md b/roadmap/implementers-guide/src/runtime/scheduler.md index ab8089a647104ccd0e496213e28836daf340fdeb..ead981b6d66b718e0183a6fe924ef6063c2bddeb 100644 --- a/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/roadmap/implementers-guide/src/runtime/scheduler.md @@ -175,11 +175,12 @@ Actions: 1. Set `SessionStartBlock` to current block number. 1. Clear all `Some` members of `AvailabilityCores`. Return all parathread claims to queue with retries un-incremented. 1. Set `configuration = Configuration::configuration()` (see [`HostConfiguration`](../types/runtime.md#host-configuration)) -1. Resize `AvailabilityCores` to have length `Paras::parachains().len() + configuration.parathread_cores with all`None` entries. +1. Resize `AvailabilityCores` to have length `Paras::parachains().len() + configuration.parathread_cores with all `None` entries. 1. Compute new validator groups by shuffling using a secure randomness beacon - We need a total of `N = Paras::parachains().len() + configuration.parathread_cores` validator groups. - - The total number of validators `V` in the `SessionChangeNotification`'s `validators` may not be evenly divided by `V`. - First, we obtain "shuffled validators" `SV` by shuffling the validators using the `SessionChangeNotification`'s random seed. + - Then, we truncate `SV` to have at most `configuration.max_validators_per_core * N` members, if `configuration.max_validators_per_core` is `Some`. + - Note that the total number of validators `V` in `SV` may not be evenly divided by `N`. - The groups are selected by partitioning `SV`. The first V % N groups will have (V / N) + 1 members, while the remaining groups will have (V / N) members each. 1. Prune the parathread queue to remove all retries beyond `configuration.parathread_retries`. - Also prune all parathread claims corresponding to de-registered parathreads. diff --git a/roadmap/implementers-guide/src/runtime/session_info.md b/roadmap/implementers-guide/src/runtime/session_info.md new file mode 100644 index 0000000000000000000000000000000000000000..ac2ad926dd6a2e56a7b2f05f49419139b5ec12f8 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/session_info.md @@ -0,0 +1,54 @@ +# Session Info + +For disputes and approvals, we need access to information about validator sets from prior sessions. We also often want easy access to the same information about the current session's validator set. This module aggregates and stores this information in a rolling window while providing easy APIs for access. + +## Storage + +Helper structs: + +```rust +struct SessionInfo { + // validators in canonical ordering. + validators: Vec, + // validators' authority discovery keys for the session in canonical ordering. + discovery_keys: Vec, + // The assignment and approval keys for validators. + approval_keys: Vec<(AssignmentId, ApprovalId)>, + // validators in shuffled ordering - these are the validator groups as produced + // by the `Scheduler` module for the session and are typically referred to by + // `GroupIndex`. + validator_groups: Vec>, + // The number of availability cores used by the protocol during this session. + n_cores: u32, + // the zeroth delay tranche width. + zeroth_delay_tranche_width: u32, + // The number of samples we do of relay_vrf_modulo. + relay_vrf_modulo_samples: u32, + // The number of delay tranches in total. + n_delay_tranches: u32, + // How many slots (BABE / SASSAFRAS) must pass before an assignment is considered a + // no-show. + no_show_slots: u32, + /// The number of validators needed to approve a block. + needed_approvals: u32, +} +``` + +Storage Layout: + +```rust +/// The earliest session for which previous session info is stored. +EarliestStoredSession: SessionIndex, +/// Session information. Should have an entry from `EarliestStoredSession..=CurrentSessionIndex` +Sessions: map SessionIndex => Option, +``` + +## Session Change + +1. Update `EarliestStoredSession` based on `config.dispute_period` and remove all entries from `Sessions` from the previous value up to the new value. +1. Create a new entry in `Sessions` with information about the current session. + +## Routines + +* `earliest_stored_session() -> SessionIndex`: Yields the earliest session for which we have information stored. +* `session_info(session: SessionIndex) -> Option`: Yields the session info for the given session, if stored. diff --git a/roadmap/implementers-guide/src/runtime/ump.md b/roadmap/implementers-guide/src/runtime/ump.md new file mode 100644 index 0000000000000000000000000000000000000000..ff2e9e09b9976bfc71ade23b4dae763546e3dd49 --- /dev/null +++ b/roadmap/implementers-guide/src/runtime/ump.md @@ -0,0 +1,100 @@ +# UMP Module + +A module responsible for Upward Message Passing (UMP). See [Messaging Overview](../messaging.md) for more details. + +## Storage + +General storage entries + +```rust +/// Paras that are to be cleaned up at the end of the session. +/// The entries are sorted ascending by the para id. +OutgoingParas: Vec; +``` + +Storage related to UMP + +```rust +/// The messages waiting to be handled by the relay-chain originating from a certain parachain. +/// +/// Note that some upward messages might have been already processed by the inclusion logic. E.g. +/// channel management messages. +/// +/// The messages are processed in FIFO order. +RelayDispatchQueues: map ParaId => Vec; +/// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. +/// +/// First item in the tuple is the count of messages and second +/// is the total length (in bytes) of the message payloads. +/// +/// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of +/// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of +/// loading the whole message queue if only the total size and count are required. +/// +/// Invariant: +/// - The set of keys should exactly match the set of keys of `RelayDispatchQueues`. +RelayDispatchQueueSize: map ParaId => (u32, u32); // (num_messages, total_bytes) +/// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. +/// +/// Invariant: +/// - The set of items from this vector should be exactly the set of the keys in +/// `RelayDispatchQueues` and `RelayDispatchQueueSize`. +NeedsDispatch: Vec; +/// This is the para that gets dispatched first during the next upward dispatchable queue +/// execution round. +/// +/// Invariant: +/// - If `Some(para)`, then `para` must be present in `NeedsDispatch`. +NextDispatchRoundStartWith: Option; +``` + + +## Initialization + +No initialization routine runs for this module. + +## Routines + +Candidate Acceptance Function: + +* `check_upward_messages(P: ParaId, Vec`): + 1. Checks that there are at most `config.max_upward_message_num_per_candidate` messages. + 1. Checks that no message exceeds `config.max_upward_message_size`. + 1. Verify that `RelayDispatchQueueSize` for `P` has enough capacity for the messages + +Candidate Enactment: + +* `enact_upward_messages(P: ParaId, Vec)`: + 1. Process each upward message `M` in order: + 1. Append the message to `RelayDispatchQueues` for `P` + 1. Increment the size and the count in `RelayDispatchQueueSize` for `P`. + 1. Ensure that `P` is present in `NeedsDispatch`. + +The following routine is intended to be called in the same time when `Paras::schedule_para_cleanup` is called. + +`schedule_para_cleanup(ParaId)`: + 1. Add the para into the `OutgoingParas` vector maintaining the sorted order. + +The following routine is meant to execute pending entries in upward message queues. This function doesn't fail, even if +dispatcing any of individual upward messages returns an error. + +`process_pending_upward_messages()`: + 1. Initialize a cumulative weight counter `T` to 0 + 1. Iterate over items in `NeedsDispatch` cyclically, starting with `NextDispatchRoundStartWith`. If the item specified is `None` start from the beginning. For each `P` encountered: + 1. Dequeue the first upward message `D` from `RelayDispatchQueues` for `P` + 1. Decrement the size of the message from `RelayDispatchQueueSize` for `P` + 1. Delegate processing of the message to the runtime. The weight consumed is added to `T`. + 1. If `T >= config.preferred_dispatchable_upward_messages_step_weight`, set `NextDispatchRoundStartWith` to `P` and finish processing. + 1. If `RelayDispatchQueues` for `P` became empty, remove `P` from `NeedsDispatch`. + 1. If `NeedsDispatch` became empty then finish processing and set `NextDispatchRoundStartWith` to `None`. + > NOTE that in practice we would need to approach the weight calculation more thoroughly, i.e. incorporate all operations + > that could take place on the course of handling these upward messages. + +## Session Change + +1. Drain `OutgoingParas`. For each `P` happened to be in the list:. + 1. Remove `RelayDispatchQueueSize` of `P`. + 1. Remove `RelayDispatchQueues` of `P`. + 1. Remove `P` if it exists in `NeedsDispatch`. + 1. If `P` is in `NextDispatchRoundStartWith`, then reset it to `None` + - Note that if we don't remove the open/close requests since they are going to die out naturally at the end of the session. diff --git a/roadmap/implementers-guide/src/types/approval.md b/roadmap/implementers-guide/src/types/approval.md new file mode 100644 index 0000000000000000000000000000000000000000..5603d03aa659a40b82f29ee90ee762b20d08aec1 --- /dev/null +++ b/roadmap/implementers-guide/src/types/approval.md @@ -0,0 +1,139 @@ +# Approval Types + +## ApprovalId + +The public key of a keypair used by a validator for approval voting on included parachain candidates. + +## AssignmentId + +The public key of a keypair used by a validator for determining assignments to approve included parachain candidates. + +## AssignmentCert + +An `AssignmentCert`, short for Assignment Certificate, is a piece of data provided by a validator to prove that they have been selected to perform secondary approval checks on an included candidate. + +These certificates can be checked in the context of a specific block, candidate, and validator assignment VRF key. The block state will also provide further context about the availability core states at that block. + +```rust +enum AssignmentCertKind { + RelayVRFModulo { + sample: u32, + }, + RelayVRFDelay { + core_index: CoreIndex, + } +} + +struct AssignmentCert { + // The criterion which is claimed to be met by this cert. + kind: AssignmentCertKind, + // The VRF showing the criterion is met. + vrf: (VRFPreOut, VRFProof), +} +``` + +> TODO: RelayEquivocation cert. Probably can only be broadcast to chains that have handled an equivocation report. + +## IndirectAssignmentCert + +An assignment cert which refers to the candidate under which the assignment is relevant by block hash. + +```rust +struct IndirectAssignmentCert { + // A block hash where the candidate appears. + block_hash: Hash, + validator: ValidatorIndex, + cert: AssignmentCert, +} +``` + +## ApprovalVote + +A vote of approval on a candidate. + +```rust +struct ApprovalVote(Hash); +``` + +## SignedApprovalVote + +```rust +struct SignedApprovalVote { + vote: ApprovalVote, + validator: ValidatorIndex, + signature: ApprovalSignature, +} +``` + +## IndirectSignedApprovalVote + +A signed approval vote which references the candidate indirectly via the block. If there exists a look-up to the candidate hash from the block hash and candidate index, then this can be transformed into a `SignedApprovalVote`. + +Although this vote references the candidate by a specific block hash and candidate index, the signature is computed on the actual `SignedApprovalVote` payload. + +```rust +struct IndirectSignedApprovalVote { + // A block hash where the candidate appears. + block_hash: Hash, + // The index of the candidate in the list of candidates fully included as-of the block. + candidate_index: u32, + validator: ValidatorIndex, + signature: ApprovalSignature, +} +``` + +## CheckedAssignmentCert + +An assignment cert which has checked both the VRF and the validity of the implied assignment according to the selection criteria rules of the protocol. This type should be declared in such a way as to be instantiable only when the checks have actually been done. Fields should be accessible via getters, not direct struct access. + +```rust +struct CheckedAssignmentCert { + cert: AssignmentCert, + validator: ValidatorIndex, + relay_block: Hash, + candidate_hash: Hash, + delay_tranche: DelayTranche, +} +``` + +## DelayTranche + +```rust +type DelayTranche = u32; +``` + +## RelayVRFStory + +Assignment criteria are based off of possible stories about the relay-chain block that included the candidate. More information on stories is available in [the informational page on approvals.](../protocol-approval.md#stories). + +```rust +/// A story based on the VRF that authorized the relay-chain block where the candidate was +/// included. +/// +/// VRF Context is "A&V RC-VRF" +struct RelayVRFStory(VRFInOut); +``` + +## RelayEquivocationStory + +```rust +/// A story based on the candidate hash itself. Should be used when a candidate is an +/// equivocation: when there are two relay-chain blocks with the same RelayVRFStory, but only +/// one contains the candidate. +/// +/// VRF Context is "A&V RC-EQUIV" +struct RelayEquivocationStory(Hash); +``` + +## ExecutionTimePair + +```rust +struct ExecutionTimePair { + // The absolute time in milliseconds that the validator claims to have taken + // with the block. + absolute: u32, + // The validator's believed ratio in execution time to the average, expressed as a fixed-point + // 16-bit unsigned integer with 8 bits before and after the point. + ratio: FixedU16, +} +``` \ No newline at end of file diff --git a/roadmap/implementers-guide/src/types/availability.md b/roadmap/implementers-guide/src/types/availability.md index 0117b174e645072b3553b6bc77a2f4b3ccc63f62..e2b90e86f43fe6ce4d8e679866d7f6e7ce002865 100644 --- a/roadmap/implementers-guide/src/types/availability.md +++ b/roadmap/implementers-guide/src/types/availability.md @@ -40,7 +40,7 @@ This is the data we want to keep available for each [candidate](candidate.md) in ```rust struct AvailableData { /// The Proof-of-Validation of the candidate. - pov: PoV, + pov: Arc, /// The persisted validation data used to check the candidate. validation_data: PersistedValidationData, } diff --git a/roadmap/implementers-guide/src/types/candidate.md b/roadmap/implementers-guide/src/types/candidate.md index 9d6776332bade055d67da3b67e2e8e7261a14b8c..86c80153f37195f61c92fde2ad2b6e16efd6a44e 100644 --- a/roadmap/implementers-guide/src/types/candidate.md +++ b/roadmap/implementers-guide/src/types/candidate.md @@ -80,6 +80,8 @@ struct CandidateDescriptor { persisted_validation_data_hash: Hash, /// The blake2-256 hash of the pov-block. pov_hash: Hash, + /// The root of a block's erasure encoding Merkle tree. + erasure_root: Hash, /// Signature on blake2-256 of components of this receipt: /// The parachain index, the relay parent, the validation data hash, and the pov_hash. signature: CollatorSignature, @@ -129,7 +131,7 @@ struct PersistedValidationData { /// /// The DMQ MQC head will be used by the validation function to authorize the downward messages /// passed by the collator. - dmq_mqc_head: Option, + dmq_mqc_head: Hash, /// The list of MQC heads for the inbound channels paired with the sender para ids. This /// vector is sorted ascending by the para id and doesn't contain multiple entries with the same /// sender. @@ -251,8 +253,6 @@ struct CandidateCommitments { horizontal_messages: Vec, /// Messages destined to be interpreted by the Relay chain itself. upward_messages: Vec, - /// The root of a block's erasure encoding Merkle tree. - erasure_root: Hash, /// New validation code. new_validation_code: Option, /// The head-data produced as a result of execution. @@ -275,27 +275,4 @@ struct SigningContext { /// The session index this signature is in the context of. session_index: SessionIndex, } -``` - -## Validation Outputs - -This struct encapsulates the outputs of candidate validation. - -```rust -struct ValidationOutputs { - /// The head-data produced by validation. - head_data: HeadData, - /// The validation data, persisted. - validation_data: PersistedValidationData, - /// Messages directed to other paras routed via the relay chain. - horizontal_messages: Vec, - /// Upwards messages to the relay chain. - upwards_messages: Vec, - /// The new validation code submitted by the execution, if any. - new_validation_code: Option, - /// The number of messages processed from the DMQ. - processed_downward_messages: u32, - /// The mark which specifies the block number up to which all inbound HRMP messages are processed. - hrmp_watermark: BlockNumber, -} -``` +``` \ No newline at end of file diff --git a/roadmap/implementers-guide/src/types/messages.md b/roadmap/implementers-guide/src/types/messages.md index e64db80b1a505ae5b3fb5526b0f8e2c0b15e198a..8ea58d14e85d796a2adef08f20b570c95e0b9d85 100644 --- a/roadmap/implementers-guide/src/types/messages.md +++ b/roadmap/implementers-guide/src/types/messages.md @@ -5,6 +5,34 @@ Types of messages that are passed between parachains and the relay chain: UMP, D There is also HRMP (Horizontally Relay-routed Message Passing) which provides the same functionality although with smaller scalability potential. +## Vertical Message Passing + +Types required for message passing between the relay-chain and a parachain. + +Actual contents of the messages is specified by the XCM standard. + +```rust,ignore +/// A message sent from a parachain to the relay-chain. +type UpwardMessage = Vec; + +/// A message sent from the relay-chain down to a parachain. +/// +/// The size of the message is limited by the `config.max_downward_message_size` +/// parameter. +type DownwardMessage = Vec; + +/// This struct extends `DownwardMessage` by adding the relay-chain block number when the message was +/// enqueued in the downward message queue. +struct InboundDownwardMessage { + /// The block number at which this messages was put into the downward message queue. + pub sent_at: BlockNumber, + /// The actual downward message to processes. + pub msg: DownwardMessage, +} +``` + +## Horizontal Message Passing + ## HrmpChannelId A type that uniquely identifies an HRMP channel. An HRMP channel is established between two paras. @@ -14,8 +42,6 @@ that we use the first item tuple for the sender and the second for the recipient is allowed between two participants in one direction, i.e. there cannot be 2 different channels identified by `(A, B)`. -`HrmpChannelId` has a defined ordering: first `sender` and tie is resolved by `recipient`. - ```rust,ignore struct HrmpChannelId { sender: ParaId, @@ -23,65 +49,6 @@ struct HrmpChannelId { } ``` -## Upward Message - -A type of messages dispatched from a parachain to the relay chain. - -```rust,ignore -enum ParachainDispatchOrigin { - /// As a simple `Origin::Signed`, using `ParaId::account_id` as its value. This is good when - /// interacting with standard modules such as `balances`. - Signed, - /// As the special `Origin::Parachain(ParaId)`. This is good when interacting with parachain- - /// aware modules which need to succinctly verify that the origin is a parachain. - Parachain, - /// As the simple, superuser `Origin::Root`. This can only be done on specially permissioned - /// parachains. - Root, -} - -/// An opaque byte buffer that encodes an entrypoint and the arguments that should be -/// provided to it upon the dispatch. -/// -/// NOTE In order to be executable the byte buffer should be decoded which potentially can fail if -/// the encoding was changed. -type RawDispatchable = Vec; - -enum UpwardMessage { - /// This upward message is meant to schedule execution of a provided dispatchable. - Dispatchable { - /// The origin with which the dispatchable should be executed. - origin: ParachainDispatchOrigin, - /// The dispatchable to be executed in its raw form. - dispatchable: RawDispatchable, - }, - /// A message for initiation of opening a new HRMP channel between the origin para and the - /// given `recipient`. - /// - /// Let `origin` be the parachain that sent this upward message. In that case the channel - /// to be opened is (`origin` -> `recipient`). - HrmpInitOpenChannel { - /// The receiving party in the channel. - recipient: ParaId, - /// How many messages can be stored in the channel at most. - max_places: u32, - /// The maximum size of a message in this channel. - max_message_size: u32, - }, - /// A message that is meant to confirm the HRMP open channel request initiated earlier by the - /// `HrmpInitOpenChannel` by the given `sender`. - /// - /// Let `origin` be the parachain that sent this upward message. In that case the channel - /// (`origin` -> `sender`) will be opened during the session change. - HrmpAcceptOpenChannel(ParaId), - /// A message for closing the specified existing channel `ch`. - /// - /// The channel to be closed is `(ch.sender -> ch.recipient)`. The parachain that sent this - /// upward message must be either `ch.sender` or `ch.recipient`. - HrmpCloseChannel(HrmpChannelId), -} -``` - ## Horizontal Message This is a message sent from a parachain to another parachain that travels through the relay chain. @@ -105,32 +72,3 @@ struct InboundHrmpMessage { pub data: Vec, } ``` - -## Downward Message - -`DownwardMessage` - is a message that goes down from the relay chain to a parachain. Such a message -could be seen as a notification, however, it is conceivable that they might be used by the relay -chain to send a request to the parachain (likely, through the `ParachainSpecific` variant). - -The serialized size of the message is limited by the `config.critical_downward_message_size` parameter. - -```rust,ignore -enum DownwardMessage { - /// Some funds were transferred into the parachain's account. The hash is the identifier that - /// was given with the transfer. - TransferInto(AccountId, Balance, Remark), - /// An opaque message which interpretation is up to the recipient para. This variant ought - /// to be used as a basis for special protocols between the relay chain and, typically system, - /// paras. - ParachainSpecific(Vec), -} - -/// A wrapped version of `DownwardMessage`. The difference is that it has attached the block number when -/// the message was sent. -struct InboundDownwardMessage { - /// The block number at which this messages was put into the downward message queue. - pub sent_at: BlockNumber, - /// The actual downward message to processes. - pub msg: DownwardMessage, -} -``` diff --git a/roadmap/implementers-guide/src/types/network.md b/roadmap/implementers-guide/src/types/network.md index 75f251613f2524361380f97e99de39178c37c334..79eab39002c6372d2b8d3fc8cb3ff5efda013e89 100644 --- a/roadmap/implementers-guide/src/types/network.md +++ b/roadmap/implementers-guide/src/types/network.md @@ -8,7 +8,12 @@ These types are those that are actually sent over the network to subsystems. type RequestId = u64; type ProtocolVersion = u32; struct PeerId(...); // opaque, unique identifier of a peer. -struct View(Vec); // Up to `N` (5?) chain heads. +struct View { + // Up to `N` (5?) chain heads. + heads: Vec, + // The number of the finalized block. + finalized_number: BlockNumber, +} enum ObservedRole { Full, @@ -18,12 +23,38 @@ enum ObservedRole { ## V1 Network Subsystem Message Types +### Approval Distribution V1 + +```rust +enum ApprovalDistributionV1Message { + /// Assignments for candidates in recent, unfinalized blocks. + /// + /// The u32 is the claimed index of the candidate this assignment corresponds to. Actually checking the assignment + /// may yield a different result. + Assignments(Vec<(IndirectAssignmentCert, u32)>), + /// Approvals for candidates in some recent, unfinalized block. + Approvals(Vec), +} +``` + ### Availability Distribution V1 ```rust enum AvailabilityDistributionV1Message { /// An erasure chunk for a given candidate hash. - Chunk(Hash, ErasureChunk), + Chunk(CandidateHash, ErasureChunk), +} +``` + +### Availability Recovery V1 + +```rust +enum AvailabilityRecoveryV1Message { + /// Request a chunk for a given candidate hash and validator index. + RequestChunk(RequestId, CandidateHash, ValidatorIndex), + /// Respond with chunk for a given candidate hash and validator index. + /// The response may be `None` if the requestee does not have the chunk. + Chunk(RequestId, Option), } ``` @@ -82,7 +113,9 @@ These are the messages for the protocol on the validation peer-set. ```rust enum ValidationProtocolV1 { + ApprovalDistribution(ApprovalDistributionV1Message), AvailabilityDistribution(AvailabilityDistributionV1Message), + AvailabilityRecovery(AvailabilityRecoveryV1Message), BitfieldDistribution(BitfieldDistributionV1Message), PoVDistribution(PoVDistributionV1Message), StatementDistribution(StatementDistributionV1Message), diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index d25553e03aaafd636d050353de56eefb4dea06e4..44f62c3e8d68bd08e63aa4a192dccd17b9dece1a 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -10,6 +10,8 @@ Signals from the overseer to a subsystem to request change in execution that has enum OverseerSignal { /// Signal about a change in active leaves. ActiveLeavesUpdate(ActiveLeavesUpdate), + /// Signal about a new best finalized block. + BlockFinalized(Hash), /// Conclude all operation. Conclude, } @@ -33,6 +35,89 @@ struct ActiveLeavesUpdate { } ``` +## Approval Voting + +Messages received by the approval voting subsystem. + +```rust +enum AssignmentCheckResult { + // The vote was accepted and should be propagated onwards. + Accepted, + // The vote was valid but duplicate and should not be propagated onwards. + AcceptedDuplicate, + // The vote was valid but too far in the future to accept right now. + TooFarInFuture, + // The vote was bad and should be ignored, reporting the peer who propagated it. + Bad, +} + +enum ApprovalCheckResult { + // The vote was accepted and should be propagated onwards. + Accepted, + // The vote was bad and should be ignored, reporting the peer who propagated it. + Bad, +} + +enum ApprovalVotingMessage { + /// Check if the assignment is valid and can be accepted by our view of the protocol. + /// Should not be sent unless the block hash is known. + CheckAndImportAssignment( + IndirectAssignmentCert, + ResponseChannel, + ), + /// Check if the approval vote is valid and can be accepted by our view of the + /// protocol. + /// + /// Should not be sent unless the block hash within the indirect vote is known. + CheckAndImportApproval( + IndirectSignedApprovalVote, + ResponseChannel, + ), + /// Returns the highest possible ancestor hash of the provided block hash which is + /// acceptable to vote on finality for. + /// The `BlockNumber` provided is the number of the block's ancestor which is the + /// earliest possible vote. + /// + /// It can also return the same block hash, if that is acceptable to vote upon. + /// Return `None` if the input hash is unrecognized. + ApprovedAncestor(Hash, BlockNumber, ResponseChannel>), +} +``` + +## Approval Distribution + +Messages received by the approval Distribution subsystem. + +```rust +/// Metadata about a block which is now live in the approval protocol. +struct BlockApprovalMeta { + /// The hash of the block. + hash: Hash, + /// The number of the block. + number: BlockNumber, + /// The candidates included by the block. Note that these are not the same as the candidates that appear within the + /// block body. + candidates: Vec, + /// The consensus slot number of the block. + slot_number: SlotNumber, +} + +enum ApprovalDistributionMessage { + /// Notify the `ApprovalDistribution` subsystem about new blocks and the candidates contained within + /// them. + NewBlocks(Vec), + /// Distribute an assignment cert from the local validator. The cert is assumed + /// to be valid, relevant, and for the given relay-parent and validator index. + /// + /// The `u32` param is the candidate index in the fully-included list. + DistributeAssignment(IndirectAssignmentCert, u32), + /// Distribute an approval vote for the local validator. The approval vote is assumed to be + /// valid, relevant, and the corresponding approval already issued. If not, the subsystem is free to drop + /// the message. + DistributeApproval(IndirectSignedApprovalVote), +} +``` + ## All Messages > TODO (now) @@ -55,6 +140,25 @@ enum AvailabilityDistributionMessage { } ``` +## Availability Recovery Message + +Messages received by the availability recovery subsystem. + +```rust +enum RecoveryError { + Invalid, + Unavailable, +} +enum AvailabilityRecoveryMessage { + /// Recover available data from validators on the network. + RecoverAvailableData( + CandidateReceipt, + SessionIndex, + ResponseChannel>, + ), +} +``` + ## Availability Store Message Messages to and from the availability store. @@ -62,18 +166,18 @@ Messages to and from the availability store. ```rust enum AvailabilityStoreMessage { /// Query the `AvailableData` of a candidate by hash. - QueryAvailableData(Hash, ResponseChannel>), + QueryAvailableData(CandidateHash, ResponseChannel>), /// Query whether an `AvailableData` exists within the AV Store. - QueryDataAvailability(Hash, ResponseChannel), + QueryDataAvailability(CandidateHash, ResponseChannel), /// Query a specific availability chunk of the candidate's erasure-coding by validator index. /// Returns the chunk and its inclusion proof against the candidate's erasure-root. - QueryChunk(Hash, ValidatorIndex, ResponseChannel>), + QueryChunk(CandidateHash, ValidatorIndex, ResponseChannel>), /// Store a specific chunk of the candidate's erasure-coding by validator index, with an /// accompanying proof. - StoreChunk(Hash, ValidatorIndex, AvailabilityChunkAndProof, ResponseChannel>), + StoreChunk(CandidateHash, ValidatorIndex, AvailabilityChunkAndProof, ResponseChannel>), /// Store `AvailableData`. If `ValidatorIndex` is provided, also store this validator's /// `AvailabilityChunkAndProof`. - StoreAvailableData(Hash, Option, u32, AvailableData, ResponseChannel>), + StoreAvailableData(CandidateHash, Option, u32, AvailableData, ResponseChannel>), } ``` @@ -107,7 +211,7 @@ enum BitfieldSigningMessage { } enum CandidateBackingMessage { /// Requests a set of backable candidates that could be backed in a child of the given /// relay-parent, referenced by its hash. - GetBackedCandidates(Hash, ResponseChannel>), + GetBackedCandidates(Hash, Vec, ResponseChannel>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. /// The PoV is expected to match the `pov_hash` in the descriptor. @@ -219,6 +323,7 @@ enum NetworkBridgeMessage { /// /// Also ask the network to stay connected to these peers at least /// until the request is revoked. + /// This can be done by dropping the receiver. ConnectToValidators { /// Ids of the validators to connect to. validator_ids: Vec, @@ -226,13 +331,6 @@ enum NetworkBridgeMessage { /// the validators as they are connected. /// The response is sent immediately for already connected peers. connected: ResponseStream<(AuthorityDiscoveryId, PeerId)>, - /// By revoking the request the caller allows the network to - /// free some peer slots thus freeing the resources. - /// It doesn't necessarily lead to peers disconnection though. - /// The revokation is enacted on in the next connection request. - /// - /// This can be done by sending to the channel or dropping the sender. - revoke: ReceiverChannel<()>, }, } ``` @@ -286,7 +384,7 @@ enum ProvisionableData { /// This bitfield indicates the availability of various candidate blocks. Bitfield(Hash, SignedAvailabilityBitfield), /// The Candidate Backing subsystem believes that this candidate is valid, pending availability. - BackedCandidate(BackedCandidate), + BackedCandidate(CandidateReceipt), /// Misbehavior reports are self-contained proofs of validator misbehavior. MisbehaviorReport(Hash, MisbehaviorReport), /// Disputes trigger a broad dispute resolution process. @@ -330,12 +428,8 @@ enum RuntimeApiRequest { Validators(ResponseChannel>), /// Get the validator groups and rotation info. ValidatorGroups(ResponseChannel<(Vec>, GroupRotationInfo)>), - /// Get the session index for children of the block. This can be used to construct a signing - /// context. - SessionIndex(ResponseChannel), - /// Get the validation code for a specific para, using the given occupied core assumption. - ValidationCode(ParaId, OccupiedCoreAssumption, ResponseChannel>), - /// Get the persisted validation data at the state of a given block for a specific para, + /// Get information about all availability cores. + AvailabilityCores(ResponseChannel>), /// with the given occupied core assumption. PersistedValidationData( ParaId, @@ -354,12 +448,25 @@ enum RuntimeApiRequest { CandidateCommitments, RuntimeApiSender, ), - /// Get information about all availability cores. - AvailabilityCores(ResponseChannel>), + /// Get the session index for children of the block. This can be used to construct a signing + /// context. + SessionIndexForChild(ResponseChannel), + /// Get the validation code for a specific para, using the given occupied core assumption. + ValidationCode(ParaId, OccupiedCoreAssumption, ResponseChannel>), + /// Fetch the historical validation code used by a para for candidates executed in + /// the context of a given block height in the current chain. + HistoricalValidationCode(ParaId, BlockNumber, ResponseChannel>), /// Get a committed candidate receipt for all candidates pending availability. CandidatePendingAvailability(ParaId, ResponseChannel>), /// Get all events concerning candidates in the last block. CandidateEvents(ResponseChannel>), + /// Get the session info for the given session, if stored. + SessionInfo(SessionIndex, ResponseChannel>), + /// Get all the pending inbound messages in the downward message queue for a para. + DmqContents(ParaId, ResponseChannel>>), + /// Get the contents of all channels addressed to the given recipient. Channels that have no + /// messages in them are also included. + InboundHrmpChannelsContents(ParaId, ResponseChannel>>>), } enum RuntimeApiMessage { @@ -400,7 +507,7 @@ Various modules request that the [Candidate Validation subsystem](../node/utilit enum ValidationResult { /// Candidate is valid, and here are the outputs and the validation data used to form inputs. /// In practice, this should be a shared type so that validation caching can be done. - Valid(ValidationOutputs, PersistedValidationData), + Valid(CandidateCommitments, PersistedValidationData), /// Candidate is invalid. Invalid, } diff --git a/roadmap/implementers-guide/src/types/runtime.md b/roadmap/implementers-guide/src/types/runtime.md index aaf55e700087ddfbe9fc16610b8f132caf145722..332860d4703b33e8604f21c9a3de481b76c6c188 100644 --- a/roadmap/implementers-guide/src/types/runtime.md +++ b/roadmap/implementers-guide/src/types/runtime.md @@ -13,7 +13,7 @@ struct HostConfiguration { /// The delay, in blocks, before a validation upgrade is applied. pub validation_upgrade_delay: BlockNumber, /// The acceptance period, in blocks. This is the amount of blocks after availability that validators - /// and fishermen have to perform secondary approval checks or issue reports. + /// and fishermen have to perform secondary checks or issue reports. pub acceptance_period: BlockNumber, /// The maximum validation code size, in bytes. pub max_code_size: u32, @@ -34,6 +34,23 @@ struct HostConfiguration { pub thread_availability_period: BlockNumber, /// The amount of blocks ahead to schedule parathreads. pub scheduling_lookahead: u32, + /// The maximum number of validators to have per core. `None` means no maximum. + pub max_validators_per_core: Option, + /// The amount of sessions to keep for disputes. + pub dispute_period: SessionIndex, + /// The amount of consensus slots that must pass between submitting an assignment and + /// submitting an approval vote before a validator is considered a no-show. + /// Must be at least 1. + pub no_show_slots: u32, + /// The number of delay tranches in total. + pub n_delay_tranches: u32, + /// The width of the zeroth delay tranche for approval assignments. This many delay tranches + /// beyond 0 are all consolidated to form a wide 0 tranche. + pub zeroth_delay_tranche_width: u32, + /// The number of validators needed to approve a block. + pub needed_approvals: u32, + /// The number of samples to do of the RelayVRFModulo approval assignment criterion. + pub relay_vrf_modulo_samples: u32, /// Total number of individual messages allowed in the parachain -> relay-chain message queue. pub max_upward_queue_count: u32, /// Total size of messages allowed in the parachain -> relay-chain message queue before which @@ -44,16 +61,14 @@ struct HostConfiguration { /// stage. /// /// NOTE that this is a soft limit and could be exceeded. - pub preferred_dispatchable_upward_messages_step_weight: u32, - /// Any dispatchable upward message that requests more than the critical amount is rejected. + pub preferred_dispatchable_upward_messages_step_weight: Weight, + /// The maximum size of an upward message that can be sent by a candidate. /// - /// The parameter value is picked up so that no dispatchable can make the block weight exceed - /// the total budget. I.e. that the sum of `preferred_dispatchable_upward_messages_step_weight` - /// and `dispatchable_upward_message_critical_weight` doesn't exceed the amount of weight left - /// under a typical worst case (e.g. no upgrades, etc) weight consumed by the required phases of - /// block execution (i.e. initialization, finalization and inherents). - pub dispatchable_upward_message_critical_weight: u32, + /// This parameter affects the upper bound of size of `CandidateCommitments`. + pub max_upward_message_size: u32, /// The maximum number of messages that a candidate can contain. + /// + /// This parameter affects the upper bound of size of `CandidateCommitments`. pub max_upward_message_num_per_candidate: u32, /// The maximum size of a message that can be put in a downward message queue. /// @@ -61,7 +76,7 @@ struct HostConfiguration { /// the PoV size. Of course, there is a lot of other different things that a parachain may /// decide to do with its PoV so this value in practice will be picked as a fraction of the PoV /// size. - pub critical_downward_message_size: u32, + pub max_downward_message_size: u32, /// Number of sessions after which an HRMP open channel request expires. pub hrmp_open_request_ttl: u32, /// The deposit that the sender should provide for opening an HRMP channel. @@ -69,18 +84,24 @@ struct HostConfiguration { /// The deposit that the recipient should provide for accepting opening an HRMP channel. pub hrmp_recipient_deposit: u32, /// The maximum number of messages allowed in an HRMP channel at once. - pub hrmp_channel_max_places: u32, + pub hrmp_channel_max_capacity: u32, /// The maximum total size of messages in bytes allowed in an HRMP channel at once. - pub hrmp_channel_max_size: u32, + pub hrmp_channel_max_total_size: u32, /// The maximum number of inbound HRMP channels a parachain is allowed to accept. pub hrmp_max_parachain_inbound_channels: u32, /// The maximum number of inbound HRMP channels a parathread is allowed to accept. pub hrmp_max_parathread_inbound_channels: u32, /// The maximum size of a message that could ever be put into an HRMP channel. + /// + /// This parameter affects the upper bound of size of `CandidateCommitments`. pub hrmp_channel_max_message_size: u32, /// The maximum number of outbound HRMP channels a parachain is allowed to open. pub hrmp_max_parachain_outbound_channels: u32, /// The maximum number of outbound HRMP channels a parathread is allowed to open. pub hrmp_max_parathread_outbound_channels: u32, + /// The maximum number of outbound HRMP messages can be sent by a candidate. + /// + /// This parameter affects the upper bound of size of `CandidateCommitments`. + pub hrmp_max_message_num_per_candidate: u32, } ``` diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 08445cd7ba04a922abd6e568283d74f4f88a7f21..bfda299e713c8ac2fcee1df0fed22bf9cace8788 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,30 +1,30 @@ [package] name = "polkadot-rpc" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] -jsonrpc-core = "15.0.0" +jsonrpc-core = "15.1.0" polkadot-primitives = { path = "../primitives" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "master"} -sc-consensus-babe-rpc = { git = "https://github.com/paritytech/substrate", branch = "master"} -sc-consensus-epochs = { git = "https://github.com/paritytech/substrate", branch = "master"} -sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master"} -sc-sync-state-rpc = { git = "https://github.com/paritytech/substrate", branch = "master"} -txpool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master" } -frame-rpc-system = { package = "substrate-frame-rpc-system", git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "master" } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false } -sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-consensus = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-chain-spec = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-consensus-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27"} +sc-consensus-babe-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27"} +sc-consensus-epochs = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27"} +sc-finality-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27"} +sc-sync-state-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27"} +txpool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-rpc-system = { package = "substrate-frame-rpc-system", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-transaction-payment-rpc = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +parity-scale-codec = { version = "1.3.5", default-features = false } +sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index cfdf76ee884fd8edd5324c52976dca8ffec2dcc4..c813e3113e1e27f4a8194504aa5384c4d60e5612 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -1,62 +1,63 @@ [package] name = "polkadot-runtime-common" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -log = { version = "0.3.9", optional = true } -rustc-hex = { version = "2.0.1", default-features = false } -serde = { version = "1.0.102", default-features = false } -serde_derive = { version = "1.0.102", optional = true } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +log = { version = "0.4.11", optional = true } +rustc-hex = { version = "2.1.0", default-features = false } +serde = { version = "1.0.118", default-features = false } +serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } -libsecp256k1 = { version = "0.3.2", default-features = false, optional = true } +libsecp256k1 = { version = "0.3.5", default-features = false, optional = true } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } [dev-dependencies] -hex-literal = "0.2.1" -keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master" } -trie-db = "0.22.0" -serde_json = "1.0.41" -libsecp256k1 = "0.3.2" +hex-literal = "0.3.1" +keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +trie-db = "0.22.1" +serde_json = "1.0.60" +libsecp256k1 = "0.3.5" [features] default = ["std"] no_std = [] std = [ "bitvec/std", - "codec/std", + "parity-scale-codec/std", "log", "rustc-hex/std", "serde_derive", @@ -79,6 +80,7 @@ std = [ "pallet-timestamp/std", "pallet-vesting/std", "pallet-transaction-payment/std", + "runtime-parachains/std", ] runtime-benchmarks = [ "libsecp256k1/hmac", diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index b01babaa64011535f0c4400cccdb5457ef5d84e0..1d6f02bfe0c4ac6759ace32d26bd3122a201353e 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -23,7 +23,7 @@ use frame_support::{ traits::{Currency, Get, VestingSchedule, EnsureOrigin, IsSubType}, weights::{Pays, DispatchClass} }; use frame_system::{ensure_signed, ensure_root, ensure_none}; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; #[cfg(feature = "std")] use serde::{self, Serialize, Deserialize, Serializer, Deserializer}; #[cfg(feature = "std")] @@ -37,13 +37,13 @@ use sp_runtime::{ }; use primitives::v1::ValidityError; -type CurrencyOf = <::VestingSchedule as VestingSchedule<::AccountId>>::Currency; -type BalanceOf = as Currency<::AccountId>>::Balance; +type CurrencyOf = <::VestingSchedule as VestingSchedule<::AccountId>>::Currency; +type BalanceOf = as Currency<::AccountId>>::Balance; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; type VestingSchedule: VestingSchedule; type Prefix: Get<&'static [u8]>; type MoveClaimOrigin: EnsureOrigin; @@ -130,7 +130,7 @@ impl sp_std::fmt::Debug for EcdsaSignature { decl_event!( pub enum Event where Balance = BalanceOf, - AccountId = ::AccountId + AccountId = ::AccountId { /// Someone claimed some DOTs. [who, ethereum_address, amount] Claimed(AccountId, EthereumAddress, Balance), @@ -138,7 +138,7 @@ decl_event!( ); decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Invalid Ethereum signature. InvalidEthereumSignature, /// Ethereum address has no claim. @@ -159,7 +159,7 @@ decl_storage! { // A macro for the Storage trait, and its implementation, for this module. // This allows for type-safe usage of the Substrate storage database, so you can // keep things around between blocks. - trait Store for Module as Claims { + trait Store for Module as Claims { Claims get(fn claims) build(|config: &GenesisConfig| { config.claims.iter().map(|(a, b, _, _)| (a.clone(), b.clone())).collect::>() }): map hasher(identity) EthereumAddress => Option>; @@ -194,7 +194,7 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The Prefix that is used in signed Ethereum messages for this network @@ -426,7 +426,7 @@ fn to_ascii_hex(data: &[u8]) -> Vec { r } -impl Module { +impl Module { // Constructs the message that Ethereum RPC's `personal_sign` and `eth_sign` would sign. fn ethereum_signable_message(what: &[u8], extra: &[u8]) -> Vec { let prefix = T::Prefix::get(); @@ -487,7 +487,7 @@ impl Module { } } -impl sp_runtime::traits::ValidateUnsigned for Module { +impl sp_runtime::traits::ValidateUnsigned for Module { type Call = Call; fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { @@ -538,11 +538,11 @@ impl sp_runtime::traits::ValidateUnsigned for Module { /// Validate `attest` calls prior to execution. Needed to avoid a DoS attack since they are /// otherwise free to place on chain. #[derive(Encode, Decode, Clone, Eq, PartialEq)] -pub struct PrevalidateAttests(sp_std::marker::PhantomData) where - ::Call: IsSubType>; +pub struct PrevalidateAttests(sp_std::marker::PhantomData) where + ::Call: IsSubType>; -impl Debug for PrevalidateAttests where - ::Call: IsSubType> +impl Debug for PrevalidateAttests where + ::Call: IsSubType> { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { @@ -555,8 +555,8 @@ impl Debug for PrevalidateAttests where } } -impl PrevalidateAttests where - ::Call: IsSubType> +impl PrevalidateAttests where + ::Call: IsSubType> { /// Create new `SignedExtension` to check runtime version. pub fn new() -> Self { @@ -564,11 +564,11 @@ impl PrevalidateAttests where } } -impl SignedExtension for PrevalidateAttests where - ::Call: IsSubType> +impl SignedExtension for PrevalidateAttests where + ::Call: IsSubType> { type AccountId = T::AccountId; - type Call = ::Call; + type Call = ::Call; type AdditionalSigned = (); type Pre = (); const IDENTIFIER: &'static str = "PrevalidateAttests"; @@ -615,7 +615,7 @@ mod secp_utils { res.0.copy_from_slice(&keccak_256(&public(secret).serialize()[1..65])[12..]); res } - pub fn sig(secret: &secp256k1::SecretKey, what: &[u8], extra: &[u8]) -> EcdsaSignature { + pub fn sig(secret: &secp256k1::SecretKey, what: &[u8], extra: &[u8]) -> EcdsaSignature { let msg = keccak_256(&>::ethereum_signable_message(&to_ascii_hex(what)[..], extra)); let (sig, recovery_id) = secp256k1::sign(&secp256k1::Message::parse(&msg), secret); let mut r = [0u8; 65]; @@ -633,10 +633,10 @@ mod tests { use secp_utils::*; use sp_core::H256; - use codec::Encode; + use parity_scale_codec::Encode; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, Identity}, testing::Header}; + use sp_runtime::{traits::{BlakeTwo256, IdentityLookup, Identity}, testing::Header}; use frame_support::{ impl_outer_origin, impl_outer_dispatch, assert_ok, assert_err, assert_noop, parameter_types, ord_parameter_types, weights::{Pays, GetDispatchInfo}, traits::ExistenceRequirement, @@ -661,12 +661,12 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: u32 = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -678,13 +678,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -697,7 +690,7 @@ mod tests { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type Event = (); type DustRemoval = (); @@ -711,7 +704,7 @@ mod tests { pub const MinVestedTransfer: u64 = 0; } - impl pallet_vesting::Trait for Test { + impl pallet_vesting::Config for Test { type Event = (); type Currency = Balances; type BlockNumberToBalance = Identity; @@ -726,7 +719,7 @@ mod tests { pub const Six: u64 = 6; } - impl Trait for Test { + impl Config for Test { type Event = (); type VestingSchedule = Vesting; type Prefix = Prefix; @@ -1048,7 +1041,7 @@ mod tests { fn claiming_while_vested_doesnt_work() { new_test_ext().execute_with(|| { // A user is already vested - assert_ok!(::VestingSchedule::add_vesting_schedule(&69, total_claims(), 100, 10)); + assert_ok!(::VestingSchedule::add_vesting_schedule(&69, total_claims(), 100, 10)); CurrencyOf::::make_free_balance_be(&69, total_claims()); assert_eq!(Balances::free_balance(69), total_claims()); assert_ok!(Claims::mint_claim(Origin::root(), eth(&bob()), 200, Some((50, 10, 1)), None)); @@ -1181,7 +1174,7 @@ mod benchmarking { const MAX_CLAIMS: u32 = 10_000; const VALUE: u32 = 1_000_000; - fn create_claim(input: u32) -> DispatchResult { + fn create_claim(input: u32) -> DispatchResult { let secret_key = secp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); let eth_address = eth(&secret_key); let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); @@ -1189,7 +1182,7 @@ mod benchmarking { Ok(()) } - fn create_claim_attest(input: u32) -> DispatchResult { + fn create_claim_attest(input: u32) -> DispatchResult { let secret_key = secp256k1::SecretKey::parse(&keccak_256(&input.encode())).unwrap(); let eth_address = eth(&secret_key); let vesting = Some((100_000u32.into(), 1_000u32.into(), 100u32.into())); diff --git a/runtime/common/src/crowdfund.rs b/runtime/common/src/crowdfund.rs index c3f96f17285d60840e03c8e38844cae0dc0c4a68..865dbcc6fc4dc084dab87ac02599dee40a70f790 100644 --- a/runtime/common/src/crowdfund.rs +++ b/runtime/common/src/crowdfund.rs @@ -69,7 +69,7 @@ use frame_support::{ decl_module, decl_storage, decl_event, decl_error, storage::child, ensure, traits::{ - Currency, Get, OnUnbalanced, WithdrawReason, ExistenceRequirement::AllowDeath + Currency, Get, OnUnbalanced, WithdrawReasons, ExistenceRequirement::AllowDeath }, }; use frame_system::ensure_signed; @@ -77,18 +77,18 @@ use sp_runtime::{ModuleId, traits::{AccountIdConversion, Hash, Saturating, Zero, CheckedAdd} }; use crate::slots; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use sp_std::vec::Vec; use primitives::v1::{Id as ParaId, HeadData}; pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; #[allow(dead_code)] pub type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; -pub trait Trait: slots::Trait { - type Event: From> + Into<::Event>; +pub trait Config: slots::Config { + type Event: From> + Into<::Event>; /// ModuleID for the crowdfund module. An appropriate value could be ```ModuleId(*b"py/cfund")``` type ModuleId: Get; @@ -164,7 +164,7 @@ pub struct FundInfo { } decl_storage! { - trait Store for Module as Crowdfund { + trait Store for Module as Crowdfund { /// Info on all of the funds. Funds get(fn funds): map hasher(twox_64_concat) FundIndex @@ -184,7 +184,7 @@ decl_storage! { decl_event! { pub enum Event where - ::AccountId, + ::AccountId, Balance = BalanceOf, { /// Create a new crowdfunding campaign. [fund_index] @@ -205,7 +205,7 @@ decl_event! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Last slot must be greater than first slot. LastSlotBeforeFirstSlot, /// The last slot cannot be more then 3 slots after the first slot. @@ -251,7 +251,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; const ModuleId: ModuleId = T::ModuleId::get(); @@ -273,7 +273,7 @@ decl_module! { ensure!(end > >::block_number(), Error::::CannotEndInPast); let deposit = T::SubmissionDeposit::get(); - let transfer = WithdrawReason::Transfer.into(); + let transfer = WithdrawReasons::TRANSFER; let imb = T::Currency::withdraw(&owner, deposit, transfer, AllowDeath)?; let index = FundCount::get(); @@ -455,7 +455,7 @@ decl_module! { // Avoid using transfer to ensure we don't pay any fees. let fund_account = &Self::fund_account_id(index); - let transfer = WithdrawReason::Transfer.into(); + let transfer = WithdrawReasons::TRANSFER; let imbalance = T::Currency::withdraw(fund_account, balance, transfer, AllowDeath)?; let _ = T::Currency::resolve_into_existing(&who, imbalance); @@ -485,7 +485,7 @@ decl_module! { let account = Self::fund_account_id(index); // Avoid using transfer to ensure we don't pay any fees. - let transfer = WithdrawReason::Transfer.into(); + let transfer = WithdrawReasons::TRANSFER; let imbalance = T::Currency::withdraw(&account, fund.deposit, transfer, AllowDeath)?; let _ = T::Currency::resolve_into_existing(&fund.owner, imbalance); @@ -528,7 +528,7 @@ decl_module! { } } -impl Module { +impl Module { /// The account ID of the fund pot. /// /// This actually does computation. If you need to keep using it, then make sure you cache the @@ -560,7 +560,7 @@ impl Module { } pub fn crowdfund_kill(index: FundIndex) { - child::kill_storage(&Self::id_from_index(index)); + child::kill_storage(&Self::id_from_index(index), None); } } @@ -579,7 +579,7 @@ mod tests { // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are requried. use sp_runtime::{ - Perbill, Permill, Percent, testing::Header, DispatchResult, + Permill, Percent, testing::Header, DispatchResult, traits::{BlakeTwo256, IdentityLookup}, }; use crate::slots::Registrar; @@ -595,12 +595,13 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: u32 = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } - impl frame_system::Trait for Test { + + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = (); type Index = u64; @@ -612,13 +613,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -629,7 +623,7 @@ mod tests { parameter_types! { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type Event = (); type DustRemoval = (); @@ -667,7 +661,7 @@ mod tests { fn min_len() -> usize { 0 } fn max_len() -> usize { 0 } } - impl pallet_treasury::Trait for Test { + impl pallet_treasury::Config for Test { type Currency = pallet_balances::Module; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; @@ -749,7 +743,7 @@ mod tests { pub const LeasePeriod: u64 = 10; pub const EndingPeriod: u64 = 3; } - impl slots::Trait for Test { + impl slots::Config for Test { type Event = (); type Currency = Balances; type Parachains = TestParachains; @@ -763,7 +757,7 @@ mod tests { pub const RetirementPeriod: u64 = 5; pub const CrowdfundModuleId: ModuleId = ModuleId(*b"py/cfund"); } - impl Trait for Test { + impl Config for Test { type Event = (); type SubmissionDeposit = SubmissionDeposit; type MinContribution = MinContribution; @@ -936,7 +930,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into() )); @@ -947,7 +941,7 @@ mod tests { assert_eq!( fund.deploy_data, Some(DeployData { - code_hash: ::Hash::default(), + code_hash: ::Hash::default(), code_size: 0, initial_head_data: vec![0].into(), }), @@ -966,7 +960,7 @@ mod tests { assert_noop!(Crowdfund::fix_deploy_data( Origin::signed(2), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into()), Error::::InvalidOrigin @@ -976,7 +970,7 @@ mod tests { assert_noop!(Crowdfund::fix_deploy_data( Origin::signed(1), 1, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into()), Error::::InvalidFundIndex @@ -986,7 +980,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -994,7 +988,7 @@ mod tests { assert_noop!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![1].into()), Error::::ExistingDeployData @@ -1014,7 +1008,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1060,7 +1054,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1088,7 +1082,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1131,7 +1125,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1273,7 +1267,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1302,7 +1296,7 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); @@ -1341,14 +1335,14 @@ mod tests { assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(1), 0, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); assert_ok!(Crowdfund::fix_deploy_data( Origin::signed(2), 1, - ::Hash::default(), + ::Hash::default(), 0, vec![0].into(), )); diff --git a/runtime/common/src/impls.rs b/runtime/common/src/impls.rs index f371972ccae803c58000adea5136e476ab0fa56e..b9bace009822e56c4a5f03c5de9b53998c967862 100644 --- a/runtime/common/src/impls.rs +++ b/runtime/common/src/impls.rs @@ -21,15 +21,14 @@ use crate::NegativeImbalance; /// Logic for the author to get a portion of fees. pub struct ToAuthor(sp_std::marker::PhantomData); - impl OnUnbalanced> for ToAuthor where - R: pallet_balances::Trait + pallet_authorship::Trait, - ::AccountId: From, - ::AccountId: Into, - ::Event: From::AccountId, - ::Balance, + R: pallet_balances::Config + pallet_authorship::Config, + ::AccountId: From, + ::AccountId: Into, + ::Event: From::AccountId, + ::Balance, pallet_balances::DefaultInstance> >, { @@ -40,3 +39,188 @@ where >::deposit_event(pallet_balances::RawEvent::Deposit(author, numeric_amount)); } } + +pub struct DealWithFees(sp_std::marker::PhantomData); +impl OnUnbalanced> for DealWithFees +where + R: pallet_balances::Config + pallet_treasury::Config + pallet_authorship::Config, + pallet_treasury::Module: OnUnbalanced>, + ::AccountId: From, + ::AccountId: Into, + ::Event: From::AccountId, + ::Balance, + pallet_balances::DefaultInstance> + >, +{ + fn on_unbalanceds(mut fees_then_tips: impl Iterator>) { + if let Some(fees) = fees_then_tips.next() { + // for fees, 80% to treasury, 20% to author + let mut split = fees.ration(80, 20); + if let Some(tips) = fees_then_tips.next() { + // for tips, if any, 100% to author + tips.merge_into(&mut split.1); + } + use pallet_treasury::Module as Treasury; + as OnUnbalanced<_>>::on_unbalanced(split.0); + as OnUnbalanced<_>>::on_unbalanced(split.1); + } + } +} + + +#[cfg(test)] +mod tests { + use super::*; + use frame_system::limits; + use frame_support::{impl_outer_origin, parameter_types, weights::DispatchClass}; + use frame_support::traits::FindAuthor; + use sp_core::H256; + use sp_runtime::{ + testing::Header, ModuleId, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + use primitives::v1::AccountId; + + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Test; + + impl_outer_origin!{ + pub enum Origin for Test {} + } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + .for_class(DispatchClass::all(), |weight| { + weight.base_extrinsic = 100; + }) + .for_class(DispatchClass::non_mandatory(), |weight| { + weight.max_total = Some(1024); + }) + .build_or_panic(); + pub BlockLength: limits::BlockLength = limits::BlockLength::max(2 * 1024); + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Config for Test { + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type BlockLength = BlockLength; + type BlockWeights = BlockWeights; + type DbWeight = (); + type Version = (); + type PalletInfo = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + } + + impl pallet_balances::Config for Test { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = (); + type AccountStore = System; + type MaxLocks = (); + type WeightInfo = (); + } + + pub struct Nobody; + impl frame_support::traits::Contains for Nobody { + fn contains(_: &AccountId) -> bool { false } + fn sorted_members() -> Vec { vec![] } + #[cfg(feature = "runtime-benchmarks")] + fn add(_: &AccountId) { unimplemented!() } + } + impl frame_support::traits::ContainsLengthBound for Nobody { + fn min_len() -> usize { 0 } + fn max_len() -> usize { 0 } + } + + parameter_types! { + pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + } + + impl pallet_treasury::Config for Test { + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Event = (); + type OnSlash = (); + type ProposalBond = (); + type ProposalBondMinimum = (); + type SpendPeriod = (); + type Burn = (); + type BurnDestination = (); + type Tippers = Nobody; + type TipCountdown = (); + type TipFindersFee = (); + type TipReportDepositBase = (); + type DataDepositPerByte = (); + type BountyDepositBase = (); + type BountyDepositPayoutDelay = (); + type BountyUpdatePeriod = (); + type MaximumReasonLength = (); + type BountyCuratorDeposit = (); + type BountyValueMinimum = (); + type ModuleId = TreasuryModuleId; + type WeightInfo = (); + } + + pub struct OneAuthor; + impl FindAuthor for OneAuthor { + fn find_author<'a, I>(_: I) -> Option + where I: 'a, + { + Some(Default::default()) + } + } + impl pallet_authorship::Config for Test { + type FindAuthor = OneAuthor; + type UncleGenerations = (); + type FilterUncle = (); + type EventHandler = (); + } + + type Treasury = pallet_treasury::Module; + type Balances = pallet_balances::Module; + type System = frame_system::Module; + + pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); + t.into() + } + + #[test] + fn test_fees_and_tip_split() { + new_test_ext().execute_with(|| { + let fee = Balances::issue(10); + let tip = Balances::issue(20); + + assert_eq!(Balances::free_balance(Treasury::account_id()), 0); + assert_eq!(Balances::free_balance(AccountId::default()), 0); + + DealWithFees::on_unbalanceds(vec![fee, tip].into_iter()); + + // Author gets 100% of tip and 20% of fee = 22 + assert_eq!(Balances::free_balance(AccountId::default()), 22); + // Treasury gets 80% of fee + assert_eq!(Balances::free_balance(Treasury::account_id()), 8); + }); + } +} diff --git a/runtime/common/src/lib.rs b/runtime/common/src/lib.rs index 9569412b7cb77daa26b09075b3cbb319e846a166..06475ee54e94bf0a3a9a4a186f4f33d84c876dc0 100644 --- a/runtime/common/src/lib.rs +++ b/runtime/common/src/lib.rs @@ -28,10 +28,11 @@ pub mod paras_sudo_wrapper; pub mod paras_registrar; use primitives::v1::{BlockNumber, ValidatorId}; -use sp_runtime::{Perquintill, Perbill, FixedPointNumber, traits::Saturating}; +use sp_runtime::{Perquintill, Perbill, FixedPointNumber}; +use frame_system::limits; use frame_support::{ parameter_types, traits::{Currency}, - weights::{Weight, constants::WEIGHT_PER_SECOND}, + weights::{Weight, constants::WEIGHT_PER_SECOND, DispatchClass}, }; use pallet_transaction_payment::{TargetedFeeAdjustment, Multiplier}; use static_assertions::const_assert; @@ -47,26 +48,27 @@ pub use pallet_balances::Call as BalancesCall; /// Implementations of some helper traits passed into runtime modules as associated types. pub use impls::ToAuthor; -pub type NegativeImbalance = as Currency<::AccountId>>::NegativeImbalance; +pub type NegativeImbalance = as Currency<::AccountId>>::NegativeImbalance; + +/// The sequence of bytes a valid wasm module binary always starts with. Apart from that it's also a +/// valid wasm module. +const WASM_MAGIC: &[u8] = &[0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00]; /// We assume that an on-initialize consumes 2.5% of the weight on average, hence a single extrinsic /// will not be allowed to consume more than `AvailableBlockRatio - 2.5%`. -pub const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_perthousand(25); +pub const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_perthousand(25); +/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used +/// by Operational extrinsics. +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +/// We allow for 2 seconds of compute with a 6 second average block time. +pub const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; + +const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); // Common constants used in all runtimes. parameter_types! { pub const BlockHashCount: BlockNumber = 2400; - /// Block time that can be used by weights. - pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; - /// Portion of the block available to normal class of dispatches. - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - /// Maximum weight that a _single_ extrinsic can take. - pub MaximumExtrinsicWeight: Weight = - AvailableBlockRatio::get().saturating_sub(AVERAGE_ON_INITIALIZE_WEIGHT) - * MaximumBlockWeight::get(); - /// Maximum length of block. 5MB. - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; - /// The portion of the `AvailableBlockRatio` that we adjust the fees with. Blocks filled less + /// The portion of the `NORMAL_DISPATCH_RATIO` that we adjust the fees with. Blocks filled less /// than this will decrease the weight and more will increase. pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); /// The adjustment variable of the runtime. Higher values will cause `TargetBlockFullness` to @@ -76,9 +78,41 @@ parameter_types! { /// that combined with `AdjustmentVariable`, we can recover from the minimum. /// See `multiplier_can_grow_from_zero`. pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); + /// Maximum length of block. Up to 5MB. + pub BlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + /// Block weights base values and limits. + pub BlockWeights: limits::BlockWeights = limits::BlockWeights::builder() + .base_block(BlockExecutionWeight::get()) + .for_class(DispatchClass::all(), |weights| { + weights.base_extrinsic = ExtrinsicBaseWeight::get(); + }) + .for_class(DispatchClass::Normal, |weights| { + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + }) + .for_class(DispatchClass::Operational, |weights| { + weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + // Operational transactions have an extra reserved space, so that they + // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + weights.reserved = Some( + MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT, + ); + }) + .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) + .build_or_panic(); } -const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); +parameter_types! { + /// A limit for off-chain phragmen unsigned solution submission. + /// + /// We want to keep it as high as possible, but can't risk having it reject, + /// so we always subtract the base block execution weight. + pub OffchainSolutionWeightLimit: Weight = BlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic + .expect("Normal extrinsics have weight limit configured by default; qed") + .saturating_sub(BlockExecutionWeight::get()); +} /// Parameterized slow adjusting fee updated based on /// https://w3f-research.readthedocs.io/en/latest/polkadot/Token%20Economics.html#-2.-slow-adjusting-mechanism @@ -102,7 +136,7 @@ impl sp_runtime::BoundToRuntimeAppPublic for ParachainSessionKeyPlaceholder +impl pallet_session::OneSessionHandler for ParachainSessionKeyPlaceholder { type Key = ValidatorId; @@ -144,14 +178,18 @@ mod multiplier_tests { parameter_types! { pub const BlockHashCount: u64 = 250; - pub const ExtrinsicBaseWeight: u64 = 100; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub BlockLength: frame_system::limits::BlockLength = + frame_system::limits::BlockLength::max(2 * 1024); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); } - impl frame_system::Trait for Runtime { + impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type BlockNumber = u64; @@ -163,13 +201,6 @@ mod multiplier_tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = (); @@ -184,7 +215,7 @@ mod multiplier_tests { let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default().build_storage::().unwrap().into(); t.execute_with(|| { - System::set_block_limits(w, 0); + System::set_block_consumed_resources(w, 0); assertions() }); } @@ -192,7 +223,8 @@ mod multiplier_tests { #[test] fn multiplier_can_grow_from_zero() { let minimum_multiplier = MinimumMultiplier::get(); - let target = TargetBlockFullness::get() * (AvailableBlockRatio::get() * MaximumBlockWeight::get()); + let target = TargetBlockFullness::get() * + BlockWeights::get().get(DispatchClass::Normal).max_total.unwrap(); // if the min is too small, then this will not change, and we are doomed forever. // the weight is 1/10th bigger than target. run_with_system_weight(target * 101 / 100, || { diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index 7c9ed36a9751d2f52f1c251e03eb0cf42a4e9a32..a813f070604aa942a148c7876ef4e2d9ed6c4d2d 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -17,8 +17,8 @@ //! Module to handle parathread/parachain registration and related fund management. //! In essence this is a simple wrapper around `paras`. +use crate::WASM_MAGIC; use sp_std::{prelude::*, result}; - use frame_support::{ decl_storage, decl_module, decl_error, ensure, dispatch::DispatchResult, @@ -33,21 +33,21 @@ use runtime_parachains::{ self, ParaGenesisArgs, }, - router, + dmp, ump, hrmp, ensure_parachain, Origin, }; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; -pub trait Trait: paras::Trait + router::Trait { +pub trait Config: paras::Config + dmp::Config + ump::Config + hrmp::Config { /// The aggregated origin type must support the `parachains` origin. We require that we can /// infallibly convert between this origin and the system origin, but in reality, they're the /// same type, we just can't express that to the Rust type system without writing a `where` /// clause everywhere. - type Origin: From<::Origin> - + Into::Origin>>; + type Origin: From<::Origin> + + Into::Origin>>; /// The system's currency for parathread payment. type Currency: ReservableCurrency; @@ -57,7 +57,7 @@ pub trait Trait: paras::Trait + router::Trait { } decl_storage! { - trait Store for Module as Registrar { + trait Store for Module as Registrar { /// Whether parathreads are enabled or not. ParathreadsRegistrationEnabled: bool; @@ -73,7 +73,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Parachain already exists. ParaAlreadyExists, /// Invalid parachain ID. @@ -86,17 +86,19 @@ decl_error! { HeadDataTooLarge, /// Parathreads registration is disabled. ParathreadsRegistrationDisabled, + /// The validation code provided doesn't start with the Wasm file magic string. + DefinitelyNotWasm, } } decl_module! { - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; /// Register a parathread with given code for immediate use. /// /// Must be sent from a Signed origin that is able to have `ParathreadDeposit` reserved. - /// `gensis_head` and `validation_code` are used to initalize the parathread's state. + /// `genesis_head` and `validation_code` are used to initalize the parathread's state. #[weight = 0] fn register_parathread( origin, @@ -107,6 +109,7 @@ decl_module! { let who = ensure_signed(origin)?; ensure!(ParathreadsRegistrationEnabled::get(), Error::::ParathreadsRegistrationDisabled); + ensure!(validation_code.0.starts_with(WASM_MAGIC), Error::::DefinitelyNotWasm); ensure!(!Paras::contains_key(id), Error::::ParaAlreadyExists); @@ -114,7 +117,7 @@ decl_module! { ensure!(outgoing.binary_search(&id).is_err(), Error::::ParaAlreadyExists); - ::Currency::reserve(&who, T::ParathreadDeposit::get())?; + ::Currency::reserve(&who, T::ParathreadDeposit::get())?; >::insert(id, who); Paras::insert(id, false); @@ -125,7 +128,7 @@ decl_module! { parachain: false, }; - >::schedule_para_initialize(id, genesis); + runtime_parachains::schedule_para_initialize::(id, genesis); Ok(()) } @@ -139,7 +142,7 @@ decl_module! { /// governance intervention). #[weight = 0] fn deregister_parathread(origin) -> DispatchResult { - let id = ensure_parachain(::Origin::from(origin))?; + let id = ensure_parachain(::Origin::from(origin))?; ensure!(ParathreadsRegistrationEnabled::get(), Error::::ParathreadsRegistrationDisabled); @@ -148,10 +151,9 @@ decl_module! { ensure!(!is_parachain, Error::::InvalidThreadId); let debtor = >::take(id); - let _ = ::Currency::unreserve(&debtor, T::ParathreadDeposit::get()); + let _ = ::Currency::unreserve(&debtor, T::ParathreadDeposit::get()); - >::schedule_para_cleanup(id); - >::schedule_para_cleanup(id); + runtime_parachains::schedule_para_cleanup::(id); Ok(()) } @@ -185,7 +187,7 @@ decl_module! { /// and the auction deposit are switched. #[weight = 0] fn swap(origin, other: ParaId) { - let id = ensure_parachain(::Origin::from(origin))?; + let id = ensure_parachain(::Origin::from(origin))?; if PendingSwap::get(other) == Some(id) { // Remove intention to swap. @@ -209,7 +211,7 @@ decl_module! { } } -impl Module { +impl Module { /// Register a parachain with given code. Must be called by root. /// Fails if given ID is already used. pub fn register_parachain( @@ -218,6 +220,7 @@ impl Module { validation_code: ValidationCode, ) -> DispatchResult { ensure!(!Paras::contains_key(id), Error::::ParaAlreadyExists); + ensure!(validation_code.0.starts_with(WASM_MAGIC), Error::::DefinitelyNotWasm); let outgoing = >::outgoing_paras(); @@ -231,7 +234,7 @@ impl Module { parachain: true, }; - >::schedule_para_initialize(id, genesis); + runtime_parachains::schedule_para_initialize::(id, genesis); Ok(()) } @@ -242,8 +245,7 @@ impl Module { ensure!(is_parachain, Error::::InvalidChainId); - >::schedule_para_cleanup(id); - >::schedule_para_cleanup(id); + runtime_parachains::schedule_para_cleanup::(id); Ok(()) } @@ -260,14 +262,15 @@ mod tests { }, testing::{UintAuthorityId, TestXt}, Perbill, curve::PiecewiseLinear, }; use primitives::v1::{ - Balance, BlockNumber, Header, Signature, + Balance, BlockNumber, Header, Signature, AuthorityDiscoveryId, }; + use frame_system::limits; use frame_support::{ traits::{Randomness, OnInitialize, OnFinalize}, impl_outer_origin, impl_outer_dispatch, assert_ok, parameter_types, }; use keyring::Sr25519Keyring; - use runtime_parachains::{initializer, configuration, inclusion, router, scheduler}; + use runtime_parachains::{initializer, configuration, inclusion, session_info, scheduler, dmp, ump, hrmp}; use pallet_session::OneSessionHandler; impl_outer_origin! { @@ -297,14 +300,16 @@ mod tests { #[derive(Clone, Eq, PartialEq)] pub struct Test; + const NORMAL_RATIO: Perbill = Perbill::from_percent(75); parameter_types! { pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: u32 = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub BlockWeights: limits::BlockWeights = + limits::BlockWeights::with_sensible_defaults(4 * 1024 * 1024, NORMAL_RATIO); + pub BlockLength: limits::BlockLength = + limits::BlockLength::max_with_normal_ratio(4 * 1024 * 1024, NORMAL_RATIO); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); type Origin = Origin; type Call = Call; @@ -317,13 +322,9 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -343,7 +344,7 @@ mod tests { pub const ExistentialDeposit: Balance = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u128; type DustRemoval = (); type Event = (); @@ -369,7 +370,7 @@ mod tests { pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; } - impl pallet_session::Trait for Test { + impl pallet_session::Config for Test { type SessionManager = (); type Keys = UintAuthorityId; type ShouldEndSession = pallet_session::PeriodicSessions; @@ -393,7 +394,7 @@ mod tests { pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; } - impl pallet_staking::Trait for Test { + impl pallet_staking::Config for Test { type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; type Event = (); @@ -414,20 +415,28 @@ mod tests { type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = (); type MinSolutionScoreBump = (); - type OffchainSolutionWeightLimit = MaximumBlockWeight; + type OffchainSolutionWeightLimit = (); type WeightInfo = (); } - impl pallet_timestamp::Trait for Test { + impl pallet_timestamp::Config for Test { type Moment = u64; type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; type WeightInfo = (); } - impl router::Trait for Test { } + impl dmp::Config for Test {} + + impl ump::Config for Test { + type UmpSink = (); + } - impl pallet_session::historical::Trait for Test { + impl hrmp::Config for Test { + type Origin = Origin; + } + + impl pallet_session::historical::Config for Test { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -457,16 +466,24 @@ mod tests { pub type ReporterId = app::Public; } - impl paras::Trait for Test { + impl paras::Config for Test { type Origin = Origin; } - impl configuration::Trait for Test { } + impl configuration::Config for Test { } - impl inclusion::Trait for Test { + impl inclusion::Config for Test { type Event = (); } + impl session_info::AuthorityDiscoveryConfig for Test { + fn authorities() -> Vec { + Vec::new() + } + } + + impl session_info::Config for Test { } + pub struct TestRandomness; impl Randomness for TestRandomness { @@ -475,11 +492,11 @@ mod tests { } } - impl initializer::Trait for Test { + impl initializer::Config for Test { type Randomness = TestRandomness; } - impl scheduler::Trait for Test { } + impl scheduler::Config for Test { } type Extrinsic = TestXt; @@ -489,8 +506,8 @@ mod tests { fn create_transaction>( call: Call, _public: test_keys::ReporterId, - _account: ::AccountId, - nonce: ::Index, + _account: ::AccountId, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { Some((call, (nonce, ()))) } @@ -507,7 +524,7 @@ mod tests { pub const MaxRetries: u32 = 3; } - impl Trait for Test { + impl Config for Test { type Origin = Origin; type Currency = pallet_balances::Module; type ParathreadDeposit = ParathreadDeposit; @@ -603,7 +620,7 @@ mod tests { assert_ok!(Registrar::register_parachain( 2u32.into(), vec![3; 3].into(), - vec![3; 3].into(), + WASM_MAGIC.to_vec().into(), )); let orig_bal = Balances::free_balance(&3u64); @@ -613,7 +630,7 @@ mod tests { Origin::signed(3u64), 8u32.into(), vec![3; 3].into(), - vec![3; 3].into(), + WASM_MAGIC.to_vec().into(), )); // deposit should be taken (reserved) @@ -652,13 +669,13 @@ mod tests { Origin::signed(1), 8u32.into(), vec![1; 3].into(), - vec![1; 3].into(), + WASM_MAGIC.to_vec().into(), )); assert_ok!(Registrar::register_parachain( 2u32.into(), vec![1; 3].into(), - vec![1; 3].into(), + WASM_MAGIC.to_vec().into(), )); run_to_block(9); @@ -686,7 +703,7 @@ mod tests { assert_ok!(Registrar::register_parachain( 1u32.into(), vec![1; 3].into(), - vec![1; 3].into(), + WASM_MAGIC.to_vec().into(), )); run_to_block(4); @@ -697,7 +714,7 @@ mod tests { assert!(Registrar::register_parachain( 1u32.into(), vec![1; 3].into(), - vec![1; 3].into(), + WASM_MAGIC.to_vec().into(), ).is_err()); run_to_block(6); @@ -705,7 +722,7 @@ mod tests { assert_ok!(Registrar::register_parachain( 1u32.into(), vec![1; 3].into(), - vec![1; 3].into(), + WASM_MAGIC.to_vec().into(), )); }); } diff --git a/runtime/common/src/paras_sudo_wrapper.rs b/runtime/common/src/paras_sudo_wrapper.rs index 80f64bf1718bbcc3cd10254311a8cac3d035bb29..c0eb9426fc285868bd727be5d7e2c62a139b82f0 100644 --- a/runtime/common/src/paras_sudo_wrapper.rs +++ b/runtime/common/src/paras_sudo_wrapper.rs @@ -16,28 +16,40 @@ //! A simple wrapper allowing `Sudo` to call into `paras` routines. +use crate::WASM_MAGIC; +use sp_std::prelude::*; use frame_support::{ - decl_error, decl_module, + decl_error, decl_module, ensure, dispatch::DispatchResult, weights::DispatchClass, }; use frame_system::ensure_root; use runtime_parachains::{ - router, - paras::{self, ParaGenesisArgs}, + configuration, dmp, ump, hrmp, paras::{self, ParaGenesisArgs}, }; use primitives::v1::Id as ParaId; /// The module's configuration trait. -pub trait Trait: paras::Trait + router::Trait { } +pub trait Config: + configuration::Config + paras::Config + dmp::Config + ump::Config + hrmp::Config +{ +} decl_error! { - pub enum Error for Module { } + pub enum Error for Module { + /// The specified parachain or parathread is not registered. + ParaDoesntExist, + /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a downward + /// message. + ExceedsMaxMessageSize, + /// The validation code provided doesn't start with the Wasm file magic string. + DefinitelyNotWasm, + } } decl_module! { /// A sudo wrapper to call into v1 paras module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; /// Schedule a para to be initialized at the start of the next session. @@ -48,7 +60,8 @@ decl_module! { genesis: ParaGenesisArgs, ) -> DispatchResult { ensure_root(origin)?; - paras::Module::::schedule_para_initialize(id, genesis); + ensure!(genesis.validation_code.0.starts_with(WASM_MAGIC), Error::::DefinitelyNotWasm); + runtime_parachains::schedule_para_initialize::(id, genesis); Ok(()) } @@ -56,8 +69,47 @@ decl_module! { #[weight = (1_000, DispatchClass::Operational)] pub fn sudo_schedule_para_cleanup(origin, id: ParaId) -> DispatchResult { ensure_root(origin)?; - paras::Module::::schedule_para_cleanup(id); - router::Module::::schedule_para_cleanup(id); + runtime_parachains::schedule_para_cleanup::(id); + Ok(()) + } + + /// Send a downward message to the given para. + /// + /// The given parachain should exist and the payload should not exceed the preconfigured size + /// `config.max_downward_message_size`. + #[weight = (1_000, DispatchClass::Operational)] + pub fn sudo_queue_downward_message(origin, id: ParaId, payload: Vec) -> DispatchResult { + ensure_root(origin)?; + ensure!(>::is_valid_para(id), Error::::ParaDoesntExist); + let config = >::config(); + >::queue_downward_message(&config, id, payload) + .map_err(|e| match e { + dmp::QueueDownwardMessageError::ExceedsMaxMessageSize => + Error::::ExceedsMaxMessageSize.into(), + }) + } + + /// Forcefully establish a channel from the sender to the recipient. + /// + /// This is equivalent to sending an `Hrmp::hrmp_init_open_channel` extrinsic followed by + /// `Hrmp::hrmp_accept_open_channel`. + #[weight = (1_000, DispatchClass::Operational)] + pub fn sudo_establish_hrmp_channel( + origin, + sender: ParaId, + recipient: ParaId, + max_capacity: u32, + max_message_size: u32, + ) -> DispatchResult { + ensure_root(origin)?; + + >::init_open_channel( + sender, + recipient, + max_capacity, + max_message_size, + )?; + >::accept_open_channel(recipient, sender)?; Ok(()) } } diff --git a/runtime/common/src/purchase.rs b/runtime/common/src/purchase.rs index 876ac695ac229ee2abd833a699715dc3636e7575..2efb61ae4aa6d663ad4de0bb37846625cb8ac78c 100644 --- a/runtime/common/src/purchase.rs +++ b/runtime/common/src/purchase.rs @@ -16,7 +16,7 @@ //! Module to process purchase of DOTs. -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use sp_runtime::{Permill, RuntimeDebug, DispatchResult, DispatchError, AnySignature}; use sp_runtime::traits::{Zero, CheckedAdd, Verify, Saturating}; use frame_support::{decl_event, decl_storage, decl_module, decl_error, ensure}; @@ -28,9 +28,9 @@ use sp_core::sr25519; use sp_std::prelude::*; /// Configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// Balances Pallet type Currency: Currency; /// Vesting Pallet @@ -47,7 +47,7 @@ pub trait Trait: frame_system::Trait { type MaxUnlocked: Get>; } -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The kind of a statement an account needs to make for a claim to be valid. #[derive(Encode, Decode, Clone, Copy, Eq, PartialEq, RuntimeDebug)] @@ -103,9 +103,9 @@ pub struct AccountStatus { decl_event!( pub enum Event where - AccountId = ::AccountId, + AccountId = ::AccountId, Balance = BalanceOf, - BlockNumber = ::BlockNumber, + BlockNumber = ::BlockNumber, { /// A [new] account was created. AccountCreated(AccountId), @@ -125,7 +125,7 @@ decl_event!( ); decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Account is not currently valid to use. InvalidAccount, /// Account used in the purchase already exists. @@ -146,7 +146,7 @@ decl_error! { } decl_storage! { - trait Store for Module as Purchase { + trait Store for Module as Purchase { // A map of all participants in the DOT purchase process. Accounts: map hasher(blake2_128_concat) T::AccountId => AccountStatus>; // The account that will be used to payout participants of the DOT purchase process. @@ -159,7 +159,7 @@ decl_storage! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; /// The maximum statement length for the statement users to sign when creating an account. @@ -340,7 +340,7 @@ decl_module! { } } -impl Module { +impl Module { fn verify_signature(who: &T::AccountId, signature: &[u8]) -> Result<(), DispatchError> { // sr25519 always expects a 64 byte signature. ensure!(signature.len() == 64, Error::::InvalidSignature); @@ -374,7 +374,7 @@ fn account_to_bytes(account: &AccountId) -> Result<[u8; 32], Dispatch /// WARNING: Executing this function will clear all storage used by this pallet. /// Be sure this is what you want... pub fn remove_pallet() -> frame_support::weights::Weight - where T: frame_system::Trait + where T: frame_system::Config { use frame_support::migration::remove_storage_prefix; remove_storage_prefix(b"Purchase", b"Accounts", b""); @@ -382,7 +382,7 @@ pub fn remove_pallet() -> frame_support::weights::Weight remove_storage_prefix(b"Purchase", b"Statement", b""); remove_storage_prefix(b"Purchase", b"UnlockBlock", b""); - T::MaximumBlockWeight::get() + ::BlockWeights::get().max_block } #[cfg(test)] @@ -393,7 +393,7 @@ mod tests { // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - Perbill, MultiSignature, + MultiSignature, traits::{BlakeTwo256, IdentityLookup, Identity, Verify, IdentifyAccount, Dispatchable}, testing::Header }; @@ -424,12 +424,12 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: u32 = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -441,13 +441,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -460,7 +453,7 @@ mod tests { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type Event = (); type DustRemoval = (); @@ -474,7 +467,7 @@ mod tests { pub const MinVestedTransfer: u64 = 0; } - impl pallet_vesting::Trait for Test { + impl pallet_vesting::Config for Test { type Event = (); type Currency = Balances; type BlockNumberToBalance = Identity; @@ -494,7 +487,7 @@ mod tests { pub const ConfigurationOrigin: AccountId = AccountId32::from([2u8; 32]); } - impl Trait for Test { + impl Config for Test { type Event = (); type Currency = Balances; type VestingSchedule = Vesting; @@ -692,7 +685,7 @@ mod tests { ); // Account with vesting - assert_ok!(::VestingSchedule::add_vesting_schedule( + assert_ok!(::VestingSchedule::add_vesting_schedule( &alice(), 100, 1, @@ -933,13 +926,13 @@ mod tests { bob(), )); // Payment is made. - assert_eq!(::Currency::free_balance(&payment_account()), 99_650); - assert_eq!(::Currency::free_balance(&alice()), 100); + assert_eq!(::Currency::free_balance(&payment_account()), 99_650); + assert_eq!(::Currency::free_balance(&alice()), 100); // 10% of the 50 units is unlocked automatically for Alice - assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); - assert_eq!(::Currency::free_balance(&bob()), 250); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::Currency::free_balance(&bob()), 250); // A max of 10 units is unlocked automatically for Bob - assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); // Status is completed. assert_eq!( Accounts::::get(alice()), @@ -966,13 +959,13 @@ mod tests { let vest_call = Call::Vesting(pallet_vesting::Call::::vest()); assert_ok!(vest_call.clone().dispatch(Origin::signed(alice()))); assert_ok!(vest_call.clone().dispatch(Origin::signed(bob()))); - assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); - assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), Some(45)); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), Some(140)); System::set_block_number(101); assert_ok!(vest_call.clone().dispatch(Origin::signed(alice()))); assert_ok!(vest_call.clone().dispatch(Origin::signed(bob()))); - assert_eq!(::VestingSchedule::vesting_balance(&alice()), None); - assert_eq!(::VestingSchedule::vesting_balance(&bob()), None); + assert_eq!(::VestingSchedule::vesting_balance(&alice()), None); + assert_eq!(::VestingSchedule::vesting_balance(&bob()), None); }); } @@ -985,7 +978,7 @@ mod tests { alice(), ), BadOrigin); // Account with Existing Vesting Schedule - assert_ok!(::VestingSchedule::add_vesting_schedule( + assert_ok!(::VestingSchedule::add_vesting_schedule( &bob(), 100, 1, 50, )); assert_noop!(Purchase::payout( diff --git a/runtime/common/src/slot_range.rs b/runtime/common/src/slot_range.rs index 23855c4acc11e11d3b7579f402d5f5e3d66a3ec9..b9751e18efb2c33a65d9a97fda752910962bfa92 100644 --- a/runtime/common/src/slot_range.rs +++ b/runtime/common/src/slot_range.rs @@ -19,7 +19,7 @@ use sp_std::{result, ops::Add, convert::{TryFrom, TryInto}}; use sp_runtime::traits::CheckedSub; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; /// Total number of possible sub ranges of slots. pub const SLOT_RANGE_COUNT: usize = 10; diff --git a/runtime/common/src/slots.rs b/runtime/common/src/slots.rs index 2e38a94fbc839bd4a471cb653ce8223f67ee3d57..284f1afa52beb4653cb225fe6a694d2906d6cfca 100644 --- a/runtime/common/src/slots.rs +++ b/runtime/common/src/slots.rs @@ -22,10 +22,10 @@ use sp_std::{prelude::*, mem::swap, convert::TryInto}; use sp_runtime::traits::{ CheckedSub, StaticLookup, Zero, One, CheckedConversion, Hash, AccountIdConversion, }; -use codec::{Encode, Decode, Codec}; +use parity_scale_codec::{Encode, Decode, Codec}; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, ensure, dispatch::DispatchResult, - traits::{Currency, ReservableCurrency, WithdrawReason, ExistenceRequirement, Get, Randomness}, + traits::{Currency, ReservableCurrency, WithdrawReasons, ExistenceRequirement, Get, Randomness}, weights::{DispatchClass, Weight}, }; use primitives::v1::{ @@ -34,12 +34,12 @@ use primitives::v1::{ use frame_system::{ensure_signed, ensure_root}; use crate::slot_range::{SlotRange, SLOT_RANGE_COUNT}; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// The module's configuration trait. -pub trait Trait: frame_system::Trait { +pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; /// The currency type used for bidding. type Currency: ReservableCurrency; @@ -161,18 +161,18 @@ pub enum IncomingParachain { Deploy { code: ValidationCode, initial_head_data: HeadData }, } -type LeasePeriodOf = ::BlockNumber; +type LeasePeriodOf = ::BlockNumber; // Winning data type. This encodes the top bidders of each range together with their bid. type WinningData = - [Option<(Bidder<::AccountId>, BalanceOf)>; SLOT_RANGE_COUNT]; + [Option<(Bidder<::AccountId>, BalanceOf)>; SLOT_RANGE_COUNT]; // Winners data type. This encodes each of the final winners of a parachain auction, the parachain // index assigned to them, their winning bid and the range that they won. type WinnersData = - Vec<(Option::AccountId>>, ParaId, BalanceOf, SlotRange)>; + Vec<(Option::AccountId>>, ParaId, BalanceOf, SlotRange)>; // This module's storage items. decl_storage! { - trait Store for Module as Slots { + trait Store for Module as Slots { /// The number of auctions that have been started so far. pub AuctionCounter get(fn auction_counter): AuctionIndex; @@ -245,7 +245,7 @@ fn swap_ordered_existence(ids: &mut [T], one: T, oth ids.sort(); } -impl SwapAux for Module { +impl SwapAux for Module { fn ensure_can_swap(one: ParaId, other: ParaId) -> Result<(), &'static str> { if >::contains_key(one) || >::contains_key(other) { Err("can't swap an undeployed parachain")? @@ -262,8 +262,8 @@ impl SwapAux for Module { decl_event!( pub enum Event where - AccountId = ::AccountId, - BlockNumber = ::BlockNumber, + AccountId = ::AccountId, + BlockNumber = ::BlockNumber, LeasePeriod = LeasePeriodOf, ParaId = ParaId, Balance = BalanceOf, @@ -292,7 +292,7 @@ decl_event!( ); decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// This auction is already in progress. AuctionInProgress, /// The lease period is in the past. @@ -323,7 +323,7 @@ decl_error! { } decl_module! { - pub struct Module for enum Call where origin: T::Origin { + pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; @@ -520,7 +520,7 @@ decl_module! { .ok_or(Error::::ParaNotOnboarding)?; if let IncomingParachain::Fixed{code_hash, code_size, initial_head_data} = details { ensure!(code.0.len() as u32 == code_size, Error::::InvalidCode); - ensure!(::Hashing::hash(&code.0) == code_hash, Error::::InvalidCode); + ensure!(::Hashing::hash(&code.0) == code_hash, Error::::InvalidCode); if starts > Self::lease_period_index() { // Hasn't yet begun. Replace the on-boarding entry with the new information. @@ -542,7 +542,7 @@ decl_module! { } } -impl Module { +impl Module { /// Deposit currently held for a particular parachain that we administer. fn deposit_held(para_id: &ParaId) -> BalanceOf { >::get(para_id).into_iter().max().unwrap_or_else(Zero::zero) @@ -630,7 +630,7 @@ impl Module { if T::Currency::withdraw( &bidder.who, amount, - WithdrawReason::Fee.into(), + WithdrawReasons::FEE, ExistenceRequirement::AllowDeath ).is_err() { continue; @@ -667,7 +667,7 @@ impl Module { if T::Currency::withdraw( ¶_id.into_account(), additional, - WithdrawReason::Fee.into(), + WithdrawReasons::FEE, ExistenceRequirement::AllowDeath ).is_err() { continue; @@ -942,10 +942,7 @@ mod tests { use std::{collections::HashMap, cell::RefCell}; use sp_core::H256; - use sp_runtime::{ - Perbill, - traits::{BlakeTwo256, Hash, IdentityLookup}, - }; + use sp_runtime::traits::{BlakeTwo256, Hash, IdentityLookup}; use frame_support::{ impl_outer_origin, parameter_types, assert_ok, assert_noop, traits::{OnInitialize, OnFinalize} @@ -964,12 +961,12 @@ mod tests { pub struct Test; parameter_types! { pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: u32 = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } - impl frame_system::Trait for Test { + impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = (); type Index = u64; @@ -981,13 +978,6 @@ mod tests { type Header = Header; type Event = (); type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -1000,7 +990,7 @@ mod tests { pub const ExistentialDeposit: u64 = 1; } - impl pallet_balances::Trait for Test { + impl pallet_balances::Config for Test { type Balance = u64; type Event = (); type DustRemoval = (); @@ -1074,7 +1064,7 @@ mod tests { pub const EndingPeriod: BlockNumber = 3; } - impl Trait for Test { + impl Config for Test { type Event = (); type Currency = Balances; type Parachains = TestParachains; diff --git a/runtime/kusama/Cargo.toml b/runtime/kusama/Cargo.toml index ccc77ca92536cc086c6c2ecb44f1d31997f73ed0..4822b4ba0100f137d6b71494e926221a7f7ba07d 100644 --- a/runtime/kusama/Cargo.toml +++ b/runtime/kusama/Cargo.toml @@ -1,89 +1,89 @@ [package] name = "kusama-runtime" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -log = { version = "0.3.9", optional = true } -rustc-hex = { version = "2.0.1", default-features = false } -serde = { version = "1.0.102", default-features = false } -serde_derive = { version = "1.0.102", optional = true } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +log = { version = "0.4.11", optional = true } +rustc-hex = { version = "2.1.0", default-features = false } +serde = { version = "1.0.118", default-features = false } +serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" -smallvec = "1.4.1" +smallvec = "1.5.1" -authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-recovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-society = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-recovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-society = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -hex-literal = { version = "0.2.1", optional = true } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +hex-literal = { version = "0.3.1", optional = true } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } [dev-dependencies] -hex-literal = "0.2.1" -libsecp256k1 = "0.3.2" -tiny-keccak = "1.5.0" -keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } +hex-literal = "0.3.1" +libsecp256k1 = "0.3.5" +tiny-keccak = "2.0.2" +keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } separator = "0.4.1" -serde_json = "1.0.41" +serde_json = "1.0.60" [build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } +substrate-wasm-builder = "3.0.0" [features] default = ["std"] @@ -95,7 +95,7 @@ std = [ "bitvec/std", "primitives/std", "rustc-hex/std", - "codec/std", + "parity-scale-codec/std", "inherents/std", "sp-core/std", "sp-api/std", diff --git a/runtime/kusama/build.rs b/runtime/kusama/build.rs index e0c89e5649323fe970ccf449a2182ccdf3cb9ac7..a75ebb4edbe1b3c55e23a2700a5d48efcaae1e54 100644 --- a/runtime/kusama/build.rs +++ b/runtime/kusama/build.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .import_memory() .export_heap_base() .build() diff --git a/runtime/kusama/src/constants.rs b/runtime/kusama/src/constants.rs index 5d81cf0bb853881906ce71473ddea03f11ec2c12..91d6b354522571079e8339b04a4184027592cb05 100644 --- a/runtime/kusama/src/constants.rs +++ b/runtime/kusama/src/constants.rs @@ -69,7 +69,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, frame_system::MaximumBlockWeight] + /// - [0, MAXIMUM_BLOCK_WEIGHT] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: @@ -95,16 +95,16 @@ pub mod fee { #[cfg(test)] mod tests { use frame_support::weights::WeightToFeePolynomial; - use runtime_common::{MaximumBlockWeight, ExtrinsicBaseWeight}; + use runtime_common::{MAXIMUM_BLOCK_WEIGHT, ExtrinsicBaseWeight}; use super::fee::WeightToFee; use super::currency::{CENTS, DOLLARS, MILLICENTS}; #[test] - // This function tests that the fee for `MaximumBlockWeight` of weight is correct + // This function tests that the fee for `MAXIMUM_BLOCK_WEIGHT` of weight is correct fn full_block_fee_is_correct() { // A full block should cost 16 DOLLARS println!("Base: {}", ExtrinsicBaseWeight::get()); - let x = WeightToFee::calc(&MaximumBlockWeight::get()); + let x = WeightToFee::calc(&MAXIMUM_BLOCK_WEIGHT); let y = 16 * DOLLARS; assert!(x.max(y) - x.min(y) < MILLICENTS); } diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 54ae7a1625047ef647594cd5765156b10d7c8b40..b6d4b8512c5c5817a4a51868254337512bf64f5d 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -20,20 +20,22 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] +use pallet_transaction_payment::CurrencyAdapter; use sp_std::prelude::*; -use sp_core::u32_trait::{_1, _2, _3, _4, _5}; -use codec::{Encode, Decode}; +use sp_std::collections::btree_map::BTreeMap; +use sp_core::u32_trait::{_1, _2, _3, _5}; +use parity_scale_codec::{Encode, Decode}; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, Signature, ValidationCode, ValidationData, ValidatorId, ValidatorIndex, + InboundDownwardMessage, InboundHrmpMessage, SessionInfo, }; use runtime_common::{ claims, SlowAdjustingFeeUpdate, CurrencyToVote, - impls::ToAuthor, - NegativeImbalance, BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, - MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, - MaximumExtrinsicWeight, ParachainSessionKeyPlaceholder, + impls::DealWithFees, + BlockHashCount, RocksDbWeight, BlockWeights, BlockLength, OffchainSolutionWeightLimit, + ParachainSessionKeyPlaceholder, }; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, ModuleId, @@ -55,7 +57,7 @@ use sp_core::OpaqueMetadata; use sp_staking::SessionIndex; use frame_support::{ parameter_types, construct_runtime, debug, RuntimeDebug, - traits::{KeyOwnerProofSystem, SplitTwoWays, Randomness, LockIdentifier, Filter, InstanceFilter}, + traits::{KeyOwnerProofSystem, Randomness, LockIdentifier, Filter, InstanceFilter}, weights::Weight, }; use frame_system::{EnsureRoot, EnsureOneOf}; @@ -88,7 +90,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("kusama"), impl_name: create_runtime_str!("parity-kusama"), authoring_version: 2, - spec_version: 2026, + spec_version: 2027, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -124,8 +126,10 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = BaseFilter; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; type Origin = Origin; type Call = Call; type Index = Nonce; @@ -137,13 +141,7 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = RocksDbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -153,15 +151,17 @@ impl frame_system::Trait for Runtime { } parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 50; } -impl pallet_scheduler::Trait for Runtime { +impl pallet_scheduler::Config for Runtime { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; type Call = Call; - type MaximumWeight = MaximumBlockWeight; + type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo; @@ -172,7 +172,7 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; @@ -201,7 +201,7 @@ parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; @@ -214,15 +214,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -/// Splits fees 80/20 between treasury and block author. -pub type DealWithFees = SplitTwoWays< - Balance, - NegativeImbalance, - _4, Treasury, // 4 parts (80%) goes to the treasury. - _1, ToAuthor, // 1 part (20%) goes to the block author. ->; - -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; @@ -236,9 +228,8 @@ parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; } -impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = DealWithFees; +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; @@ -247,7 +238,7 @@ impl pallet_transaction_payment::Trait for Runtime { parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -259,7 +250,7 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -285,7 +276,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; @@ -298,7 +289,7 @@ impl pallet_session::Trait for Runtime { type WeightInfo = weights::pallet_session::WeightInfo; } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -332,9 +323,6 @@ parameter_types! { pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4; pub const MaxIterations: u32 = 10; pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); - pub OffchainSolutionWeightLimit: Weight = MaximumExtrinsicWeight::get() - .saturating_sub(BlockExecutionWeight::get()) - .saturating_sub(ExtrinsicBaseWeight::get()); } type SlashCancelOrigin = EnsureOneOf< @@ -343,7 +331,7 @@ type SlashCancelOrigin = EnsureOneOf< pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective> >; -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVote; @@ -385,7 +373,7 @@ parameter_types! { pub const MaxProposals: u32 = 100; } -impl pallet_democracy::Trait for Runtime { +impl pallet_democracy::Config for Runtime { type Proposal = Call; type Event = Event; type Currency = Balances; @@ -441,7 +429,7 @@ parameter_types! { } type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -464,7 +452,7 @@ parameter_types! { // Make sure that there are no more than MaxMembers members elected via phragmen. const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); -impl pallet_elections_phragmen::Trait for Runtime { +impl pallet_elections_phragmen::Config for Runtime { type Event = Event; type Currency = Balances; type ChangeMembers = Council; @@ -489,7 +477,7 @@ parameter_types! { } type TechnicalCollective = pallet_collective::Instance2; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -500,7 +488,7 @@ impl pallet_collective::Trait for Runtime { type WeightInfo = weights::pallet_collective::WeightInfo; } -impl pallet_membership::Trait for Runtime { +impl pallet_membership::Config for Runtime { type Event = Event; type AddOrigin = MoreThanHalfCouncil; type RemoveOrigin = MoreThanHalfCouncil; @@ -536,7 +524,7 @@ type ApproveOrigin = EnsureOneOf< pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> >; -impl pallet_treasury::Trait for Runtime { +impl pallet_treasury::Config for Runtime { type ModuleId = TreasuryModuleId; type Currency = Balances; type ApproveOrigin = ApproveOrigin; @@ -563,17 +551,17 @@ impl pallet_treasury::Trait for Runtime { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime {} parameter_types! { pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _; @@ -585,7 +573,7 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); } -impl pallet_im_online::Trait for Runtime { +impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type ReportUnresponsiveness = Offences; @@ -594,7 +582,7 @@ impl pallet_im_online::Trait for Runtime { type WeightInfo = weights::pallet_im_online::WeightInfo; } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -622,7 +610,7 @@ impl frame_system::offchain::CreateSignedTransaction for R call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { // take the biggest period possible. let period = BlockHashCount::get() @@ -672,7 +660,7 @@ parameter_types! { pub Prefix: &'static [u8] = b"Pay KSMs to the Kusama account:"; } -impl claims::Trait for Runtime { +impl claims::Config for Runtime { type Event = Event; type VestingSchedule = Vesting; type Prefix = Prefix; @@ -689,7 +677,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl pallet_identity::Trait for Runtime { +impl pallet_identity::Config for Runtime { type Event = Event; type Currency = Balances; type Slashed = Treasury; @@ -704,7 +692,7 @@ impl pallet_identity::Trait for Runtime { type WeightInfo = weights::pallet_identity::WeightInfo; } -impl pallet_utility::Trait for Runtime { +impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; type WeightInfo = weights::pallet_utility::WeightInfo; @@ -718,7 +706,7 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl pallet_multisig::Trait for Runtime { +impl pallet_multisig::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -735,7 +723,7 @@ parameter_types! { pub const RecoveryDeposit: Balance = 5 * DOLLARS; } -impl pallet_recovery::Trait for Runtime { +impl pallet_recovery::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -756,7 +744,7 @@ parameter_types! { pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); } -impl pallet_society::Trait for Runtime { +impl pallet_society::Config for Runtime { type Event = Event; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; @@ -777,7 +765,7 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl pallet_vesting::Trait for Runtime { +impl pallet_vesting::Config for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; @@ -851,15 +839,21 @@ impl InstanceFilter for ProxyType { Call::Multisig(..) ), ProxyType::Governance => matches!(c, - Call::Democracy(..) | Call::Council(..) | Call::TechnicalCommittee(..) - | Call::ElectionsPhragmen(..) | Call::Treasury(..) | Call::Utility(..) + Call::Democracy(..) | + Call::Council(..) | + Call::TechnicalCommittee(..) | + Call::ElectionsPhragmen(..) | + Call::Treasury(..) | + Call::Utility(..) ), ProxyType::Staking => matches!(c, - Call::Staking(..) | Call::Utility(..) + Call::Staking(..) | + Call::Session(..) | + Call::Utility(..) ), ProxyType::IdentityJudgement => matches!(c, - Call::Identity(pallet_identity::Call::provide_judgement(..)) - | Call::Utility(pallet_utility::Call::batch(..)) + Call::Identity(pallet_identity::Call::provide_judgement(..)) | + Call::Utility(..) ) } } @@ -874,7 +868,7 @@ impl InstanceFilter for ProxyType { } } -impl pallet_proxy::Trait for Runtime { +impl pallet_proxy::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -889,37 +883,232 @@ impl pallet_proxy::Trait for Runtime { type AnnouncementDepositFactor = AnnouncementDepositFactor; } +pub struct FixCouncilHistoricalVotes; +impl frame_support::traits::OnRuntimeUpgrade for FixCouncilHistoricalVotes { + fn on_runtime_upgrade() -> frame_support::weights::Weight { + use frame_support::traits::ReservableCurrency; + use sp_runtime::traits::Zero; + let mut failure: Balance = 0; + // https://github.com/paritytech/polkadot/pull/1252/files#diff-cba4e599a9fdd88fe8d33b5ed913958d63f844186b53c5cbe9bc73a2e2944857R22 + + // https://polkascan.io/kusama/runtime-module/2007-electionsphragmen + let old_bond = 50_000_000_000; + // https://polkascan.io/kusama/runtime-module/2008-electionsphragmen + let current_bond = 8_333_333_330; + let to_unreserve = old_bond - current_bond; // 41666666670 + + // source of accounts: https://github.com/paritytech/substrate/issues/7223 + vec![ + [52u8, 227, 117, 17, 229, 245, 8, 66, 43, 10, 142, 216, 196, 102, 119, 154, 34, 41, 53, 183, 37, 186, 250, 70, 247, 129, 207, 56, 2, 96, 181, 69], + [87, 71, 87, 4, 112, 230, 183, 229, 153, 158, 195, 253, 122, 165, 32, 37, 212, 105, 167, 124, 20, 165, 83, 106, 177, 214, 223, 18, 146, 184, 186, 42], + [74, 223, 81, 164, 123, 114, 121, 83, 102, 213, 34, 133, 227, 41, 34, 156, 131, 110, 167, 187, 254, 19, 157, 190, 143, 160, 112, 12, 79, 134, 252, 86], + [98, 135, 195, 120, 192, 49, 156, 220, 141, 79, 176, 216, 27, 229, 80, 37, 72, 104, 114, 242, 254, 188, 218, 156, 66, 143, 164, 131, 182, 181, 43, 27], + [22, 106, 142, 133, 251, 42, 232, 228, 187, 104, 21, 64, 122, 178, 225, 117, 115, 5, 10, 8, 14, 27, 171, 197, 2, 34, 100, 254, 249, 233, 111, 94], + [230, 17, 194, 236, 237, 27, 86, 17, 131, 248, 143, 174, 208, 221, 125, 136, 213, 250, 253, 241, 111, 88, 64, 198, 62, 195, 109, 140, 49, 19, 111, 97], + [45, 100, 142, 202, 87, 103, 177, 184, 106, 165, 70, 32, 79, 239, 241, 127, 98, 45, 74, 19, 53, 72, 54, 34, 95, 212, 237, 10, 49, 18, 118, 11], + [78, 212, 66, 185, 0, 51, 101, 94, 134, 29, 31, 236, 213, 26, 156, 115, 199, 195, 117, 27, 34, 125, 115, 175, 37, 139, 73, 23, 110, 16, 121, 19], + [198, 17, 209, 81, 89, 27, 253, 242, 89, 118, 43, 153, 183, 128, 97, 97, 123, 89, 210, 171, 23, 66, 63, 32, 239, 233, 142, 222, 32, 184, 217, 120], + [48, 89, 157, 186, 80, 181, 243, 186, 11, 54, 248, 86, 167, 97, 235, 60, 10, 238, 97, 232, 48, 212, 190, 180, 72, 239, 148, 182, 173, 146, 190, 57], + [178, 75, 65, 145, 80, 177, 162, 44, 37, 159, 216, 50, 26, 48, 88, 234, 131, 168, 17, 141, 41, 235, 11, 196, 110, 0, 86, 230, 249, 136, 148, 39], + [0, 90, 67, 60, 142, 21, 28, 129, 174, 148, 133, 68, 244, 203, 7, 98, 43, 24, 168, 67, 4, 128, 222, 111, 198, 225, 163, 139, 196, 111, 156, 39], + [80, 10, 128, 247, 239, 148, 61, 30, 111, 173, 141, 133, 33, 169, 238, 221, 44, 22, 26, 149, 224, 64, 133, 242, 123, 198, 162, 35, 123, 47, 17, 57], + [228, 248, 227, 202, 10, 103, 4, 160, 7, 148, 69, 176, 153, 221, 192, 80, 193, 253, 39, 48, 70, 249, 58, 115, 4, 15, 66, 115, 105, 58, 184, 61], + [146, 142, 243, 123, 168, 13, 37, 253, 223, 148, 61, 229, 35, 244, 110, 88, 140, 135, 188, 134, 227, 131, 24, 149, 242, 125, 169, 157, 38, 154, 160, 18], + [12, 55, 156, 202, 114, 167, 250, 113, 52, 125, 148, 219, 103, 69, 77, 226, 216, 92, 20, 234, 202, 146, 140, 75, 76, 99, 153, 156, 27, 168, 164, 48], + [94, 105, 67, 219, 185, 200, 207, 213, 51, 119, 166, 115, 7, 41, 14, 250, 193, 175, 244, 170, 35, 242, 134, 43, 216, 100, 10, 243, 117, 111, 121, 44], + [176, 235, 16, 242, 219, 110, 35, 128, 177, 12, 46, 128, 32, 93, 131, 158, 3, 181, 150, 226, 40, 253, 141, 242, 188, 117, 191, 197, 150, 174, 171, 36], + [188, 94, 5, 123, 119, 210, 246, 167, 145, 84, 105, 228, 217, 124, 68, 191, 165, 211, 135, 133, 201, 241, 211, 8, 146, 250, 25, 231, 234, 206, 57, 57], + [190, 109, 228, 0, 24, 21, 61, 124, 206, 0, 67, 246, 131, 206, 237, 153, 207, 59, 48, 135, 152, 89, 96, 151, 169, 64, 107, 186, 201, 145, 144, 21], + [168, 176, 158, 34, 73, 77, 195, 235, 190, 198, 231, 174, 81, 174, 202, 99, 219, 183, 220, 4, 216, 95, 64, 254, 135, 161, 130, 228, 157, 18, 205, 122], + [58, 175, 247, 7, 11, 38, 34, 147, 124, 193, 15, 99, 218, 12, 92, 232, 75, 72, 123, 210, 200, 62, 174, 59, 183, 5, 78, 112, 137, 169, 221, 5], + [38, 132, 41, 39, 201, 138, 80, 171, 29, 67, 154, 180, 95, 33, 197, 190, 182, 151, 5, 86, 225, 253, 123, 82, 223, 68, 151, 126, 67, 68, 177, 72], + [160, 50, 214, 174, 242, 243, 162, 74, 49, 196, 28, 253, 251, 33, 243, 155, 163, 253, 207, 201, 237, 31, 56, 185, 22, 125, 172, 178, 228, 61, 116, 124], + [94, 237, 179, 116, 143, 73, 1, 160, 48, 111, 172, 136, 170, 109, 127, 28, 131, 61, 146, 143, 219, 236, 250, 236, 67, 247, 90, 172, 31, 95, 125, 122], + [136, 143, 102, 104, 40, 232, 50, 138, 51, 100, 122, 71, 188, 151, 87, 74, 106, 86, 113, 129, 146, 112, 204, 1, 230, 108, 113, 57, 161, 166, 145, 26], + [41, 76, 90, 193, 202, 37, 94, 199, 50, 139, 43, 253, 174, 91, 152, 164, 163, 181, 13, 201, 149, 100, 7, 183, 161, 145, 13, 143, 215, 229, 129, 232], + [16, 252, 67, 246, 61, 252, 235, 195, 3, 194, 11, 182, 243, 47, 162, 8, 197, 85, 240, 183, 52, 85, 172, 246, 161, 197, 65, 200, 79, 219, 177, 104], + [160, 87, 16, 231, 9, 55, 108, 216, 216, 28, 145, 235, 37, 92, 96, 16, 52, 194, 45, 134, 150, 78, 181, 46, 183, 229, 201, 35, 45, 19, 176, 94], + [134, 135, 73, 95, 235, 234, 33, 222, 68, 159, 242, 115, 129, 249, 48, 141, 166, 241, 92, 229, 217, 211, 20, 98, 97, 39, 93, 236, 24, 205, 86, 111], + [251, 174, 188, 92, 115, 39, 20, 75, 229, 29, 243, 91, 181, 15, 248, 97, 44, 140, 154, 215, 63, 199, 182, 11, 67, 130, 185, 121, 86, 61, 226, 15], + [190, 224, 239, 104, 232, 185, 30, 26, 131, 177, 69, 35, 42, 159, 216, 68, 170, 200, 161, 101, 95, 61, 114, 21, 61, 99, 221, 132, 47, 71, 6, 100], + [132, 237, 28, 134, 11, 165, 89, 21, 143, 203, 78, 152, 122, 33, 213, 210, 155, 117, 79, 248, 141, 180, 215, 75, 125, 214, 64, 79, 188, 233, 114, 22], + [203, 124, 199, 178, 246, 36, 201, 44, 111, 173, 142, 231, 116, 88, 163, 92, 122, 202, 173, 226, 176, 62, 95, 6, 52, 80, 156, 239, 29, 183, 206, 9], + [178, 38, 5, 179, 106, 208, 161, 253, 17, 62, 16, 224, 250, 91, 72, 135, 21, 160, 113, 252, 152, 33, 173, 20, 68, 167, 33, 102, 67, 28, 30, 21], + [0, 85, 93, 35, 172, 249, 206, 242, 240, 251, 36, 168, 255, 45, 70, 79, 228, 161, 147, 137, 98, 46, 36, 1, 38, 15, 73, 36, 114, 171, 123, 70], + [198, 88, 98, 42, 56, 161, 58, 36, 180, 89, 254, 109, 16, 255, 214, 120, 192, 204, 248, 245, 145, 124, 72, 217, 139, 9, 182, 116, 98, 86, 9, 26], + [178, 219, 195, 92, 207, 8, 98, 148, 160, 210, 78, 16, 145, 208, 140, 163, 181, 194, 164, 135, 7, 28, 79, 181, 64, 112, 230, 102, 204, 153, 224, 45], + [118, 253, 161, 198, 240, 206, 6, 239, 41, 107, 105, 123, 178, 23, 249, 142, 69, 146, 242, 95, 20, 113, 228, 97, 146, 148, 115, 55, 146, 48, 147, 173], + [171, 42, 226, 38, 198, 62, 131, 93, 136, 64, 239, 182, 111, 170, 191, 132, 59, 203, 110, 239, 70, 42, 12, 117, 248, 87, 48, 58, 24, 193, 214, 207], + [226, 156, 174, 201, 243, 176, 175, 214, 64, 12, 186, 43, 40, 42, 230, 20, 41, 71, 218, 167, 131, 80, 249, 155, 42, 116, 123, 52, 44, 42, 25, 64], + [38, 233, 51, 113, 227, 226, 183, 195, 139, 229, 42, 201, 30, 142, 166, 33, 165, 173, 117, 24, 213, 88, 15, 167, 179, 109, 37, 11, 158, 211, 87, 26], + [28, 82, 239, 62, 195, 223, 46, 66, 201, 184, 90, 253, 224, 20, 86, 231, 70, 19, 20, 166, 143, 22, 94, 166, 11, 34, 2, 175, 87, 13, 17, 20], + [6, 121, 215, 46, 243, 76, 78, 115, 130, 220, 90, 195, 3, 135, 100, 66, 46, 201, 243, 74, 103, 244, 214, 70, 253, 30, 228, 245, 93, 182, 92, 27], + [56, 242, 67, 184, 105, 96, 247, 25, 150, 176, 97, 251, 46, 223, 29, 42, 114, 79, 82, 223, 42, 165, 104, 95, 225, 132, 222, 222, 236, 237, 180, 70], + [206, 163, 218, 190, 82, 178, 166, 101, 177, 225, 155, 248, 198, 145, 58, 93, 84, 224, 109, 100, 19, 202, 61, 219, 236, 143, 154, 34, 65, 94, 196, 119], + [32, 51, 169, 66, 133, 238, 5, 16, 36, 249, 231, 26, 132, 203, 51, 48, 85, 127, 124, 4, 154, 5, 45, 96, 136, 44, 186, 14, 212, 82, 209, 45], + [136, 87, 179, 203, 183, 159, 117, 238, 119, 98, 216, 164, 49, 132, 57, 146, 127, 210, 181, 22, 67, 156, 89, 113, 52, 195, 208, 159, 224, 227, 241, 3], + [58, 69, 248, 95, 254, 189, 177, 143, 25, 199, 92, 139, 237, 97, 234, 17, 219, 250, 40, 132, 41, 202, 235, 238, 203, 35, 33, 26, 73, 237, 165, 32], + [146, 24, 163, 171, 202, 106, 170, 124, 218, 48, 242, 73, 62, 87, 229, 38, 27, 6, 15, 95, 57, 47, 45, 76, 221, 154, 171, 55, 19, 227, 61, 60], + [60, 58, 195, 101, 58, 75, 249, 167, 40, 117, 131, 147, 187, 201, 189, 197, 202, 49, 226, 154, 237, 70, 161, 88, 95, 211, 212, 145, 2, 87, 200, 33], + [230, 153, 129, 0, 226, 30, 98, 227, 216, 119, 32, 200, 72, 8, 114, 41, 148, 250, 98, 95, 100, 23, 108, 158, 149, 236, 85, 106, 118, 13, 64, 78], + [208, 159, 158, 0, 216, 253, 73, 87, 0, 248, 236, 76, 249, 90, 162, 232, 39, 227, 251, 183, 239, 0, 130, 254, 46, 202, 75, 146, 104, 48, 250, 29], + [206, 65, 0, 132, 231, 167, 48, 145, 37, 141, 211, 98, 59, 98, 217, 50, 157, 101, 135, 114, 63, 194, 96, 210, 142, 85, 21, 144, 133, 63, 93, 88], + [58, 34, 87, 220, 204, 157, 71, 5, 126, 215, 168, 184, 84, 75, 160, 45, 84, 172, 6, 243, 13, 119, 230, 88, 140, 30, 21, 137, 150, 229, 20, 38], + [202, 91, 193, 145, 93, 167, 74, 186, 58, 173, 215, 206, 123, 128, 144, 69, 213, 235, 91, 115, 85, 146, 89, 117, 95, 220, 216, 90, 64, 165, 220, 110], + [10, 58, 158, 3, 226, 253, 136, 14, 137, 63, 60, 210, 253, 3, 181, 124, 125, 40, 29, 43, 70, 105, 185, 59, 16, 42, 148, 5, 43, 227, 101, 98], + [172, 150, 113, 140, 115, 71, 210, 56, 57, 84, 225, 178, 82, 233, 29, 155, 84, 156, 238, 44, 60, 146, 176, 166, 170, 54, 96, 170, 124, 201, 81, 56], + [158, 190, 208, 112, 142, 212, 167, 220, 247, 24, 86, 187, 83, 134, 53, 201, 255, 190, 70, 99, 40, 99, 7, 223, 197, 166, 14, 154, 188, 223, 70, 30], + [60, 67, 92, 98, 149, 98, 142, 28, 126, 136, 184, 249, 235, 75, 188, 61, 96, 166, 59, 25, 140, 13, 201, 175, 192, 130, 4, 170, 74, 190, 195, 113], + [78, 203, 3, 76, 75, 78, 165, 166, 103, 0, 12, 191, 228, 137, 234, 15, 122, 162, 12, 197, 222, 180, 111, 152, 25, 187, 100, 17, 157, 252, 83, 39], + [146, 250, 178, 111, 64, 184, 149, 164, 242, 68, 16, 85, 67, 135, 47, 22, 85, 142, 224, 194, 245, 114, 165, 219, 48, 131, 56, 230, 241, 205, 118, 35], + [111, 136, 30, 180, 158, 175, 45, 159, 88, 34, 172, 160, 141, 149, 18, 237, 72, 43, 243, 95, 36, 70, 169, 253, 20, 102, 134, 46, 122, 117, 94, 40], + [230, 224, 55, 10, 146, 36, 6, 46, 185, 8, 5, 58, 133, 127, 124, 142, 115, 39, 215, 94, 175, 55, 41, 148, 133, 70, 80, 119, 188, 168, 103, 26], + [88, 134, 227, 88, 24, 157, 191, 87, 39, 23, 227, 3, 155, 129, 197, 229, 132, 243, 115, 46, 114, 152, 182, 251, 24, 162, 203, 14, 223, 70, 110, 18], + [78, 192, 56, 30, 68, 39, 237, 101, 103, 247, 165, 195, 40, 40, 140, 237, 54, 195, 59, 236, 234, 110, 206, 205, 129, 69, 0, 31, 66, 48, 172, 27], + [188, 110, 18, 215, 171, 112, 171, 234, 76, 8, 219, 112, 85, 232, 79, 22, 186, 184, 23, 181, 251, 53, 144, 136, 173, 81, 144, 66, 45, 249, 221, 29], + [184, 134, 3, 172, 197, 123, 71, 84, 219, 125, 44, 26, 224, 165, 217, 103, 32, 108, 191, 22, 216, 108, 41, 133, 56, 89, 83, 174, 178, 5, 143, 5], + [10, 216, 180, 249, 77, 200, 230, 34, 158, 44, 68, 141, 153, 80, 148, 205, 193, 189, 53, 109, 193, 76, 97, 85, 70, 122, 192, 126, 222, 24, 184, 114], + [26, 170, 217, 19, 57, 86, 181, 16, 1, 80, 222, 130, 169, 29, 138, 87, 109, 207, 182, 63, 199, 221, 13, 83, 54, 8, 57, 131, 149, 198, 208, 83], + [96, 138, 24, 198, 63, 184, 175, 138, 213, 226, 226, 154, 248, 15, 23, 237, 238, 81, 195, 43, 137, 19, 196, 103, 238, 168, 38, 237, 103, 102, 37, 40], + [52, 128, 169, 39, 185, 38, 19, 53, 116, 172, 54, 108, 87, 60, 188, 116, 37, 164, 126, 195, 94, 206, 39, 89, 153, 179, 209, 240, 131, 82, 156, 46], + [246, 4, 145, 84, 210, 56, 187, 133, 217, 118, 194, 157, 220, 55, 43, 88, 228, 254, 223, 5, 126, 65, 104, 125, 12, 250, 57, 241, 71, 113, 171, 83], + [86, 173, 152, 172, 190, 131, 221, 21, 171, 209, 16, 17, 30, 220, 112, 220, 192, 162, 19, 36, 91, 45, 44, 192, 169, 65, 10, 9, 51, 57, 255, 70], + [64, 123, 211, 149, 104, 201, 8, 6, 47, 202, 49, 232, 8, 152, 189, 202, 190, 237, 160, 117, 1, 51, 131, 240, 249, 166, 158, 208, 126, 177, 38, 38], + [2, 57, 183, 234, 172, 195, 234, 64, 151, 134, 240, 51, 106, 137, 118, 7, 86, 35, 172, 239, 49, 159, 197, 119, 124, 118, 3, 61, 213, 133, 184, 64], + [96, 254, 164, 33, 61, 85, 200, 104, 191, 200, 140, 122, 127, 80, 64, 175, 89, 63, 213, 255, 88, 154, 127, 26, 93, 114, 70, 81, 223, 37, 5, 95], + [72, 35, 54, 126, 94, 99, 159, 33, 213, 118, 137, 168, 157, 235, 63, 72, 148, 114, 187, 16, 4, 122, 103, 117, 103, 88, 162, 148, 218, 167, 159, 21], + [232, 206, 1, 108, 146, 138, 182, 169, 95, 61, 218, 93, 127, 149, 24, 50, 55, 80, 176, 2, 18, 205, 131, 111, 249, 163, 241, 242, 126, 178, 193, 33], + [248, 254, 82, 84, 191, 224, 104, 1, 129, 7, 9, 121, 239, 231, 44, 94, 176, 153, 4, 59, 48, 7, 79, 48, 221, 12, 21, 168, 74, 188, 68, 92], + [2, 156, 106, 91, 42, 221, 67, 178, 36, 110, 31, 47, 8, 233, 169, 131, 255, 102, 80, 228, 186, 141, 9, 32, 35, 145, 198, 162, 141, 60, 223, 54], + [0, 95, 174, 86, 79, 8, 222, 91, 181, 144, 141, 255, 246, 191, 240, 249, 80, 123, 116, 75, 33, 215, 1, 125, 71, 138, 167, 239, 92, 135, 249, 124], + [4, 198, 135, 31, 33, 23, 62, 34, 187, 204, 153, 2, 161, 186, 65, 165, 19, 204, 95, 255, 121, 124, 148, 138, 54, 146, 124, 239, 112, 20, 140, 48], + [146, 46, 66, 112, 210, 142, 32, 160, 129, 86, 195, 218, 234, 150, 130, 77, 79, 69, 30, 232, 224, 12, 77, 254, 7, 81, 203, 63, 65, 228, 187, 74], + [52, 234, 22, 159, 11, 191, 106, 184, 97, 55, 123, 62, 156, 195, 78, 82, 255, 163, 241, 103, 79, 136, 123, 113, 177, 75, 50, 64, 66, 33, 177, 53], + [10, 122, 197, 190, 105, 168, 36, 63, 136, 128, 213, 253, 1, 91, 46, 143, 143, 48, 206, 108, 113, 98, 248, 188, 181, 173, 26, 31, 164, 36, 109, 50], + [10, 91, 84, 200, 115, 95, 146, 200, 152, 137, 149, 161, 91, 207, 61, 17, 192, 46, 232, 218, 103, 99, 52, 168, 162, 144, 252, 116, 63, 99, 73, 40], + [36, 123, 240, 229, 60, 125, 242, 213, 41, 87, 26, 15, 48, 180, 88, 19, 205, 151, 252, 208, 8, 248, 210, 15, 180, 43, 68, 160, 205, 95, 28, 119], + [142, 57, 249, 121, 182, 35, 220, 93, 141, 234, 130, 249, 187, 90, 126, 152, 100, 181, 181, 61, 85, 2, 201, 139, 200, 140, 14, 115, 199, 49, 192, 14], + [132, 70, 235, 131, 233, 186, 168, 74, 114, 31, 172, 138, 150, 168, 7, 117, 176, 86, 48, 31, 223, 126, 113, 95, 57, 141, 125, 203, 37, 249, 174, 114], + [164, 213, 85, 73, 205, 119, 18, 200, 239, 149, 51, 108, 167, 171, 251, 28, 232, 84, 51, 51, 30, 72, 84, 172, 255, 170, 232, 72, 135, 12, 105, 6], + [214, 194, 236, 50, 109, 31, 114, 151, 96, 221, 23, 131, 234, 33, 109, 164, 43, 212, 147, 65, 13, 192, 151, 171, 47, 139, 85, 207, 241, 109, 226, 37], + [25, 148, 223, 91, 240, 244, 67, 66, 177, 113, 155, 251, 177, 86, 18, 134, 189, 129, 182, 216, 79, 87, 127, 85, 239, 69, 254, 122, 214, 245, 14, 74], + [68, 16, 115, 21, 34, 226, 104, 3, 184, 230, 235, 110, 84, 103, 215, 122, 170, 5, 6, 132, 185, 87, 34, 187, 166, 96, 136, 44, 144, 169, 208, 21], + [92, 143, 180, 46, 128, 189, 71, 207, 86, 229, 246, 37, 92, 23, 88, 25, 163, 73, 234, 107, 147, 239, 18, 125, 118, 57, 132, 179, 253, 113, 79, 49], + [152, 97, 132, 18, 9, 74, 115, 6, 101, 205, 185, 117, 139, 71, 65, 181, 84, 53, 3, 174, 8, 178, 181, 247, 154, 70, 3, 147, 89, 138, 183, 54], + [117, 159, 129, 181, 10, 57, 31, 216, 133, 197, 227, 207, 216, 106, 49, 242, 18, 70, 125, 101, 88, 44, 149, 1, 10, 72, 187, 48, 210, 126, 209, 231], + [230, 213, 178, 217, 236, 22, 235, 17, 122, 106, 200, 208, 125, 215, 17, 51, 126, 87, 75, 194, 187, 122, 246, 10, 57, 213, 62, 197, 108, 139, 115, 89], + [56, 85, 62, 17, 98, 50, 252, 144, 165, 195, 142, 14, 85, 228, 46, 97, 195, 219, 204, 67, 197, 178, 64, 234, 124, 62, 50, 179, 125, 103, 201, 81], + [184, 253, 244, 203, 162, 173, 242, 65, 221, 223, 194, 0, 136, 194, 60, 114, 56, 128, 185, 125, 197, 65, 244, 137, 5, 217, 158, 177, 186, 14, 92, 39], + [160, 76, 27, 164, 78, 128, 105, 139, 142, 143, 248, 18, 107, 138, 77, 120, 70, 196, 126, 223, 48, 55, 194, 172, 131, 28, 239, 131, 36, 2, 89, 28], + [186, 25, 173, 248, 171, 133, 40, 201, 245, 48, 88, 180, 148, 182, 21, 77, 222, 15, 173, 254, 43, 222, 179, 169, 185, 200, 119, 97, 205, 203, 180, 65], + [12, 76, 85, 245, 143, 131, 207, 130, 43, 102, 255, 202, 240, 87, 249, 239, 185, 252, 101, 71, 87, 85, 3, 232, 17, 88, 172, 202, 13, 145, 101, 27], + [113, 153, 171, 173, 152, 127, 178, 8, 186, 128, 74, 4, 122, 115, 23, 37, 195, 7, 45, 117, 37, 238, 162, 188, 223, 217, 127, 168, 193, 76, 138, 119], + [12, 206, 158, 33, 12, 71, 63, 209, 242, 1, 120, 254, 136, 156, 23, 137, 86, 234, 28, 243, 37, 197, 75, 26, 67, 154, 136, 188, 98, 254, 120, 81], + [134, 213, 134, 159, 7, 115, 242, 48, 151, 43, 141, 107, 62, 252, 233, 210, 189, 93, 155, 169, 218, 86, 103, 181, 166, 136, 166, 251, 103, 252, 201, 36], + [156, 152, 138, 156, 80, 10, 196, 114, 228, 177, 236, 190, 171, 59, 16, 81, 77, 203, 139, 205, 80, 8, 183, 26, 32, 234, 161, 191, 40, 29, 168, 15], + [96, 132, 24, 217, 54, 66, 26, 130, 142, 118, 240, 102, 152, 105, 47, 47, 66, 53, 132, 35, 4, 42, 239, 229, 119, 171, 238, 44, 33, 41, 228, 187], + [38, 43, 59, 107, 223, 253, 235, 155, 48, 76, 96, 233, 143, 87, 248, 107, 239, 214, 130, 34, 67, 94, 60, 243, 23, 172, 32, 79, 79, 55, 112, 78], + [246, 178, 29, 98, 72, 50, 9, 75, 3, 170, 103, 46, 1, 100, 98, 160, 32, 226, 23, 204, 103, 177, 67, 71, 133, 185, 145, 20, 162, 180, 250, 90], + [138, 152, 73, 84, 229, 126, 123, 240, 75, 163, 140, 241, 166, 30, 215, 71, 131, 212, 202, 118, 116, 76, 63, 169, 246, 220, 10, 253, 85, 217, 23, 71], + [38, 207, 39, 144, 245, 25, 234, 121, 233, 220, 11, 81, 64, 16, 219, 209, 75, 187, 207, 106, 139, 84, 32, 107, 108, 178, 68, 20, 3, 5, 236, 112], + [64, 255, 129, 147, 44, 86, 190, 113, 168, 32, 124, 138, 153, 50, 141, 96, 165, 162, 176, 111, 212, 14, 208, 94, 196, 178, 214, 106, 235, 202, 255, 104], + [44, 25, 247, 67, 149, 0, 166, 187, 208, 78, 125, 185, 236, 25, 139, 4, 89, 160, 4, 196, 128, 47, 39, 229, 0, 254, 77, 248, 122, 61, 227, 27], + [174, 206, 85, 8, 225, 55, 152, 52, 175, 47, 168, 28, 167, 138, 137, 244, 103, 82, 129, 11, 37, 53, 123, 150, 243, 158, 203, 190, 18, 195, 200, 55], + [190, 243, 241, 170, 113, 179, 43, 186, 119, 91, 56, 134, 185, 0, 162, 227, 251, 79, 65, 99, 213, 140, 27, 206, 10, 174, 207, 224, 181, 92, 27, 95], + [218, 214, 230, 25, 76, 32, 165, 14, 194, 19, 56, 71, 77, 52, 110, 93, 38, 112, 237, 19, 172, 17, 68, 117, 145, 189, 5, 133, 201, 124, 200, 101], + [146, 73, 247, 0, 26, 190, 182, 82, 240, 43, 224, 199, 223, 167, 173, 151, 130, 188, 113, 208, 86, 81, 255, 20, 235, 214, 89, 225, 229, 159, 130, 126], + [204, 88, 161, 4, 79, 211, 105, 244, 82, 11, 187, 174, 226, 18, 241, 32, 61, 124, 179, 97, 27, 84, 80, 153, 243, 137, 134, 27, 145, 28, 2, 90], + [178, 33, 243, 211, 58, 219, 171, 225, 105, 91, 109, 239, 143, 159, 179, 179, 10, 51, 201, 238, 226, 231, 176, 36, 52, 17, 82, 213, 253, 187, 226, 51], + [172, 29, 45, 130, 196, 166, 155, 22, 195, 206, 158, 181, 208, 182, 243, 79, 148, 138, 52, 239, 230, 36, 136, 135, 154, 81, 75, 188, 131, 126, 14, 80], + [126, 194, 148, 162, 173, 83, 41, 233, 36, 136, 220, 29, 232, 46, 77, 165, 208, 239, 112, 206, 133, 36, 44, 15, 93, 22, 174, 219, 36, 96, 0, 125], + [182, 191, 157, 11, 214, 231, 26, 222, 121, 107, 197, 21, 181, 99, 44, 71, 187, 157, 143, 154, 229, 81, 95, 52, 45, 55, 23, 134, 255, 110, 90, 30], + [162, 160, 236, 188, 172, 133, 147, 194, 200, 66, 108, 85, 218, 66, 110, 32, 41, 3, 162, 118, 183, 33, 255, 117, 139, 139, 110, 108, 2, 96, 52, 5], + [218, 18, 91, 123, 235, 68, 15, 182, 161, 69, 168, 24, 157, 227, 50, 42, 108, 168, 226, 83, 193, 19, 39, 128, 139, 41, 198, 42, 232, 118, 176, 13], + [218, 214, 145, 46, 29, 34, 180, 161, 82, 185, 48, 163, 42, 136, 88, 162, 4, 109, 16, 187, 21, 166, 51, 211, 124, 151, 142, 222, 173, 110, 119, 46], + [94, 215, 163, 23, 159, 65, 29, 10, 174, 240, 104, 130, 69, 139, 87, 245, 27, 53, 80, 145, 184, 70, 187, 54, 96, 153, 66, 109, 80, 25, 162, 82], + [104, 214, 130, 92, 100, 194, 124, 40, 175, 70, 14, 143, 173, 49, 59, 178, 254, 215, 90, 255, 89, 232, 223, 153, 179, 237, 202, 237, 236, 150, 216, 102], + [166, 101, 158, 76, 63, 34, 194, 170, 151, 213, 74, 54, 227, 26, 181, 122, 97, 122, 246, 43, 212, 62, 198, 46, 213, 112, 119, 20, 146, 6, 146, 112], + [20, 229, 93, 235, 203, 26, 151, 13, 177, 181, 31, 83, 86, 1, 8, 13, 18, 141, 245, 223, 242, 89, 63, 238, 30, 51, 105, 19, 157, 81, 192, 114], + [44, 36, 100, 44, 239, 20, 231, 115, 21, 191, 70, 124, 0, 145, 124, 116, 154, 25, 195, 229, 166, 223, 112, 85, 72, 166, 122, 167, 173, 10, 209, 56], + [142, 133, 30, 217, 146, 34, 143, 34, 104, 238, 140, 97, 79, 230, 7, 93, 56, 0, 6, 10, 225, 64, 152, 224, 48, 148, 19, 160, 168, 28, 68, 112], + [84, 94, 128, 100, 248, 137, 138, 41, 212, 129, 30, 9, 178, 7, 207, 51, 2, 229, 206, 254, 241, 102, 21, 248, 88, 15, 205, 143, 166, 58, 98, 78], + [218, 47, 127, 176, 63, 207, 248, 72, 142, 2, 155, 189, 98, 249, 82, 112, 244, 5, 195, 2, 137, 92, 194, 133, 100, 166, 158, 6, 144, 50, 230, 116], + [42, 138, 54, 49, 198, 224, 120, 197, 217, 30, 242, 215, 114, 10, 252, 175, 64, 173, 186, 66, 90, 100, 138, 128, 130, 66, 13, 125, 7, 140, 71, 58], + [156, 120, 182, 33, 219, 174, 128, 170, 103, 151, 162, 143, 117, 32, 89, 238, 241, 171, 215, 99, 218, 189, 163, 89, 85, 96, 160, 52, 143, 248, 46, 57], + [232, 139, 71, 107, 182, 41, 146, 230, 64, 3, 205, 166, 216, 146, 173, 149, 225, 180, 93, 128, 227, 254, 240, 29, 10, 65, 25, 225, 235, 227, 163, 6], + [121, 91, 9, 166, 254, 68, 24, 31, 178, 252, 33, 186, 252, 39, 149, 139, 185, 99, 188, 188, 73, 107, 169, 0, 92, 176, 6, 44, 242, 122, 240, 145], + [18, 52, 99, 140, 43, 150, 145, 119, 163, 23, 246, 218, 246, 253, 90, 40, 104, 207, 68, 132, 217, 142, 158, 174, 83, 255, 207, 181, 178, 229, 182, 95], + [64, 164, 10, 249, 72, 67, 69, 141, 42, 50, 223, 253, 168, 193, 19, 20, 60, 76, 38, 59, 104, 159, 178, 47, 235, 40, 23, 212, 75, 85, 116, 71], + [90, 135, 58, 121, 143, 143, 110, 100, 254, 215, 107, 203, 160, 199, 182, 86, 86, 161, 81, 93, 144, 199, 51, 190, 175, 173, 102, 139, 228, 4, 116, 109], + [62, 30, 163, 156, 6, 70, 240, 232, 22, 213, 96, 56, 232, 180, 57, 15, 60, 179, 203, 155, 153, 72, 62, 189, 153, 198, 5, 207, 52, 135, 38, 117], + [44, 112, 144, 18, 248, 7, 175, 143, 195, 240, 210, 171, 176, 197, 28, 169, 168, 141, 78, 242, 77, 26, 9, 43, 248, 157, 172, 245, 206, 99, 234, 29], + [28, 46, 116, 60, 92, 209, 172, 126, 74, 248, 247, 204, 141, 211, 239, 86, 31, 116, 155, 112, 215, 44, 170, 215, 182, 233, 212, 116, 28, 124, 47, 56], + [250, 97, 238, 17, 124, 244, 135, 220, 57, 98, 15, 172, 108, 62, 133, 81, 17, 246, 132, 53, 130, 122, 28, 100, 104, 164, 91, 138, 183, 59, 122, 147], + [58, 13, 33, 166, 234, 193, 159, 44, 11, 84, 97, 158, 123, 225, 71, 8, 234, 35, 71, 206, 84, 152, 118, 183, 248, 102, 3, 149, 189, 13, 86, 168], + [210, 150, 179, 95, 208, 49, 151, 66, 83, 55, 119, 53, 143, 48, 183, 8, 170, 246, 179, 135, 9, 210, 90, 89, 246, 87, 110, 88, 22, 108, 209, 77], + [78, 21, 80, 146, 0, 103, 4, 128, 134, 169, 243, 15, 121, 154, 23, 73, 80, 142, 34, 42, 209, 169, 217, 153, 245, 134, 230, 243, 231, 130, 201, 50], + [172, 29, 49, 23, 191, 6, 255, 232, 145, 41, 74, 11, 29, 19, 218, 87, 78, 212, 129, 65, 9, 0, 161, 70, 196, 152, 211, 120, 21, 216, 97, 107], + [171, 172, 81, 10, 126, 40, 213, 246, 82, 66, 253, 253, 50, 154, 112, 117, 40, 245, 162, 134, 93, 237, 142, 52, 41, 104, 176, 27, 1, 79, 238, 84], + [122, 115, 159, 88, 227, 223, 95, 8, 10, 209, 71, 155, 19, 244, 39, 151, 221, 160, 232, 147, 185, 17, 168, 33, 30, 80, 97, 94, 111, 90, 145, 23], + [60, 63, 34, 232, 97, 176, 18, 120, 81, 178, 12, 69, 219, 238, 113, 125, 26, 228, 253, 183, 174, 26, 138, 208, 111, 64, 64, 41, 244, 124, 121, 67], + [40, 148, 254, 89, 9, 137, 110, 100, 156, 123, 146, 165, 201, 220, 254, 199, 164, 120, 52, 58, 234, 170, 210, 158, 121, 241, 68, 27, 79, 59, 113, 37], + [10, 80, 189, 80, 152, 191, 196, 83, 56, 254, 215, 66, 252, 122, 147, 90, 255, 158, 208, 88, 197, 55, 123, 32, 17, 101, 133, 144, 127, 16, 98, 1], + [46, 105, 172, 145, 220, 43, 62, 84, 175, 210, 215, 71, 54, 231, 223, 217, 95, 170, 30, 115, 141, 171, 6, 108, 128, 50, 137, 128, 199, 201, 7, 110], + [34, 67, 70, 237, 99, 250, 41, 140, 128, 100, 237, 222, 206, 7, 18, 51, 3, 66, 165, 15, 47, 21, 42, 95, 175, 180, 84, 240, 9, 165, 104, 85], + [86, 15, 189, 117, 179, 219, 150, 239, 113, 227, 59, 97, 96, 14, 63, 55, 169, 38, 64, 8, 135, 218, 170, 174, 56, 13, 54, 54, 148, 156, 7, 103], + [106, 217, 75, 166, 62, 43, 95, 39, 205, 242, 178, 147, 7, 109, 3, 214, 253, 255, 44, 20, 164, 97, 54, 104, 211, 243, 117, 150, 167, 140, 152, 71], + [54, 149, 171, 208, 232, 116, 221, 99, 156, 141, 102, 199, 185, 226, 175, 117, 139, 91, 54, 222, 54, 187, 1, 240, 233, 80, 72, 207, 181, 224, 15, 104], + [50, 174, 189, 199, 130, 120, 182, 27, 121, 74, 196, 214, 54, 179, 189, 241, 91, 1, 232, 195, 235, 11, 118, 71, 106, 115, 21, 53, 107, 92, 173, 13], + [248, 50, 93, 17, 160, 222, 207, 148, 89, 28, 188, 52, 219, 39, 38, 73, 24, 224, 147, 207, 156, 221, 0, 146, 208, 108, 78, 134, 97, 111, 28, 41], + [196, 252, 84, 183, 173, 5, 166, 238, 111, 47, 225, 171, 174, 86, 2, 197, 161, 240, 88, 149, 207, 167, 191, 117, 184, 97, 188, 245, 46, 62, 24, 99], + [152, 102, 212, 80, 61, 5, 186, 40, 174, 224, 52, 123, 31, 99, 129, 168, 38, 158, 80, 205, 38, 8, 190, 75, 155, 233, 112, 115, 234, 155, 158, 5], + [58, 244, 16, 159, 67, 195, 93, 65, 105, 111, 153, 149, 45, 112, 230, 188, 137, 80, 77, 197, 83, 61, 191, 24, 151, 55, 187, 203, 215, 135, 96, 97], + [188, 163, 103, 207, 165, 67, 118, 65, 78, 154, 254, 205, 53, 215, 163, 42, 23, 1, 31, 210, 108, 134, 202, 237, 146, 247, 187, 188, 11, 238, 11, 127], + [70, 125, 148, 246, 12, 162, 254, 200, 189, 252, 132, 57, 38, 8, 141, 245, 173, 39, 79, 235, 74, 140, 44, 208, 70, 92, 168, 203, 120, 245, 76, 114], + [44, 240, 131, 139, 5, 251, 24, 39, 24, 222, 133, 149, 37, 250, 30, 109, 83, 213, 87, 229, 252, 246, 49, 238, 159, 244, 76, 97, 152, 16, 212, 59], + [138, 247, 46, 8, 175, 253, 239, 75, 125, 166, 137, 80, 188, 72, 94, 147, 57, 41, 40, 23, 129, 252, 18, 213, 36, 233, 140, 140, 30, 144, 164, 29], + [20, 70, 102, 216, 66, 208, 224, 194, 67, 50, 123, 232, 185, 119, 254, 139, 64, 238, 36, 57, 24, 65, 14, 129, 107, 127, 195, 178, 199, 159, 116, 102], + [200, 131, 83, 61, 79, 131, 122, 150, 17, 115, 59, 190, 222, 176, 212, 32, 178, 87, 61, 28, 144, 55, 39, 59, 72, 181, 35, 55, 104, 248, 95, 8], + [124, 213, 155, 165, 255, 79, 185, 97, 149, 226, 204, 185, 234, 204, 215, 139, 255, 152, 46, 15, 21, 219, 126, 148, 45, 114, 209, 185, 87, 162, 252, 10], + [112, 189, 233, 173, 82, 193, 14, 226, 75, 136, 20, 76, 97, 47, 14, 86, 208, 211, 183, 153, 91, 217, 224, 84, 17, 112, 224, 111, 46, 127, 199, 8], + [56, 15, 250, 13, 153, 166, 81, 158, 10, 180, 216, 160, 140, 45, 96, 255, 90, 140, 119, 98, 199, 158, 20, 138, 230, 238, 137, 145, 112, 1, 0, 68], + [62, 110, 138, 244, 155, 221, 46, 135, 254, 143, 195, 97, 196, 10, 114, 182, 95, 193, 193, 238, 177, 161, 79, 135, 6, 67, 54, 244, 45, 223, 231, 3], + [190, 126, 211, 122, 134, 233, 155, 156, 17, 151, 255, 143, 163, 165, 228, 182, 64, 59, 84, 1, 150, 246, 205, 9, 175, 47, 188, 67, 234, 154, 87, 115], + [156, 8, 170, 109, 173, 183, 172, 39, 165, 150, 128, 2, 57, 201, 163, 99, 200, 160, 148, 206, 213, 196, 98, 132, 153, 72, 241, 15, 81, 45, 158, 27], + [238, 158, 10, 156, 237, 29, 152, 9, 5, 107, 74, 220, 168, 210, 36, 234, 60, 53, 154, 185, 175, 31, 182, 152, 96, 40, 254, 129, 110, 55, 102, 90], + [222, 136, 73, 91, 148, 50, 65, 218, 20, 17, 179, 20, 86, 14, 220, 181, 27, 201, 144, 98, 219, 220, 77, 207, 144, 107, 172, 12, 72, 82, 244, 52], + [20, 188, 115, 8, 240, 253, 101, 118, 31, 236, 245, 236, 16, 75, 180, 56, 238, 70, 125, 153, 10, 248, 72, 55, 204, 56, 122, 105, 222, 73, 168, 95], + [134, 140, 213, 79, 174, 161, 160, 228, 88, 54, 99, 91, 43, 246, 88, 115, 52, 54, 236, 105, 197, 86, 125, 101, 27, 229, 146, 57, 44, 187, 105, 220], + [100, 103, 253, 78, 112, 56, 185, 37, 194, 66, 35, 87, 56, 13, 140, 192, 197, 241, 125, 39, 47, 99, 154, 248, 252, 253, 31, 17, 86, 222, 112, 64], + [176, 119, 188, 76, 14, 254, 156, 6, 250, 209, 36, 141, 91, 39, 90, 121, 157, 44, 229, 114, 204, 187, 146, 96, 27, 172, 36, 104, 210, 159, 228, 75], + [212, 72, 42, 216, 212, 46, 156, 252, 128, 249, 248, 10, 9, 55, 100, 74, 36, 62, 89, 139, 239, 130, 62, 59, 33, 68, 68, 84, 53, 197, 54, 35], + [94, 229, 69, 146, 105, 249, 76, 245, 52, 214, 99, 26, 51, 45, 212, 153, 4, 169, 75, 56, 71, 104, 117, 103, 206, 172, 77, 215, 76, 187, 37, 18], + [168, 7, 6, 72, 246, 228, 59, 125, 138, 143, 16, 65, 139, 105, 97, 48, 210, 4, 108, 16, 100, 95, 16, 8, 93, 232, 14, 96, 152, 184, 95, 9], + [12, 30, 86, 186, 160, 124, 128, 173, 10, 212, 212, 241, 151, 236, 105, 29, 17, 4, 103, 1, 12, 168, 194, 86, 71, 57, 145, 157, 113, 209, 9, 124], + [162, 97, 27, 101, 196, 115, 166, 134, 30, 13, 237, 211, 142, 107, 20, 138, 87, 77, 165, 10, 133, 77, 181, 60, 105, 241, 234, 73, 65, 240, 214, 40], + [168, 243, 128, 29, 140, 120, 224, 144, 194, 1, 238, 189, 86, 169, 82, 167, 233, 13, 83, 92, 237, 86, 132, 253, 211, 253, 103, 106, 154, 207, 75, 68], + [76, 228, 33, 55, 12, 240, 37, 125, 134, 150, 24, 236, 37, 195, 36, 237, 76, 108, 127, 101, 40, 146, 151, 163, 193, 52, 51, 44, 33, 46, 53, 11], + [60, 51, 69, 125, 109, 17, 237, 123, 60, 82, 245, 245, 89, 208, 48, 121, 2, 208, 151, 80, 79, 101, 160, 185, 87, 194, 175, 234, 146, 246, 63, 28], + [186, 80, 165, 140, 50, 132, 33, 151, 29, 245, 67, 142, 199, 59, 10, 187, 95, 78, 69, 71, 166, 254, 108, 31, 9, 9, 6, 230, 11, 71, 49, 67], + [166, 148, 132, 242, 177, 14, 194, 241, 222, 161, 147, 148, 66, 61, 87, 111, 145, 198, 181, 171, 35, 21, 179, 137, 244, 225, 8, 188, 240, 170, 40, 64], + ] + .into_iter() + .map(|acc| + AccountId::from(acc) + ).for_each(|acc| { + if !Balances::unreserve(&acc, to_unreserve).is_zero() { + failure += 1; + }; + }); + frame_support::debug::info!("Migration to fix voters happened. Accounts with inaccurate reserved amount: {}", failure); + ::BlockWeights::get().max_block + } +} + pub struct CustomOnRuntimeUpgrade; impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { - // Update scheduler origin usage - #[derive(Encode, Decode)] - #[allow(non_camel_case_types)] - pub enum OldOriginCaller { - system(frame_system::Origin), - pallet_collective_Instance1( - pallet_collective::Origin - ), - pallet_collective_Instance2( - pallet_collective::Origin - ), - } - - impl Into for OldOriginCaller { - fn into(self) -> OriginCaller { - match self { - OldOriginCaller::system(o) => OriginCaller::system(o), - OldOriginCaller::pallet_collective_Instance1(o) => - OriginCaller::pallet_collective_Instance1(o), - OldOriginCaller::pallet_collective_Instance2(o) => - OriginCaller::pallet_collective_Instance2(o), - } - } - } - - pallet_scheduler::Module::::migrate_origin::(); - - ::MaximumBlockWeight::get() + 0 } } @@ -1019,7 +1208,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllModules, - CustomOnRuntimeUpgrade + FixCouncilHistoricalVotes, >; /// The payload being signed in the transactions. pub type SignedPayload = generic::SignedPayload; @@ -1110,7 +1299,7 @@ sp_api::impl_runtime_apis! { } fn check_validation_outputs( _: Id, - _: primitives::v1::ValidationOutputs, + _: primitives::v1::CandidateCommitments, ) -> bool { false } @@ -1119,27 +1308,37 @@ sp_api::impl_runtime_apis! { 0 } + fn session_info(_: SessionIndex) -> Option { + None + } + fn validation_code(_: Id, _: OccupiedCoreAssumption) -> Option { None } - fn candidate_pending_availability(_: Id) -> Option> { + fn historical_validation_code(_: Id, _: BlockNumber) -> Option { None } - fn candidate_events() -> Vec> { - Vec::new() + fn candidate_pending_availability(_: Id) -> Option> { + None } - fn validator_discovery(_: Vec) -> Vec> { + fn candidate_events() -> Vec> { Vec::new() } fn dmq_contents( _recipient: Id, - ) -> Vec> { + ) -> Vec> { Vec::new() } + + fn inbound_hrmp_channels_contents( + _recipient: Id + ) -> BTreeMap>> { + BTreeMap::new() + } } impl fg_primitives::GrandpaApi for Runtime { @@ -1166,7 +1365,7 @@ sp_api::impl_runtime_apis! { _set_id: fg_primitives::SetId, authority_id: fg_primitives::AuthorityId, ) -> Option { - use codec::Encode; + use parity_scale_codec::Encode; Historical::prove((fg_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -1199,7 +1398,7 @@ sp_api::impl_runtime_apis! { _slot_number: babe_primitives::SlotNumber, authority_id: babe_primitives::AuthorityId, ) -> Option { - use codec::Encode; + use parity_scale_codec::Encode; Historical::prove((babe_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -1265,9 +1464,9 @@ sp_api::impl_runtime_apis! { use pallet_offences_benchmarking::Module as OffencesBench; use frame_system_benchmarking::Module as SystemBench; - impl pallet_session_benchmarking::Trait for Runtime {} - impl pallet_offences_benchmarking::Trait for Runtime {} - impl frame_system_benchmarking::Trait for Runtime {} + impl pallet_session_benchmarking::Config for Runtime {} + impl pallet_offences_benchmarking::Config for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number @@ -1321,7 +1520,7 @@ mod test_fees { use frame_support::storage::StorageValue; use sp_runtime::FixedPointNumber; use frame_support::weights::GetDispatchInfo; - use codec::Encode; + use parity_scale_codec::Encode; use pallet_transaction_payment::Multiplier; use separator::Separatable; @@ -1329,11 +1528,12 @@ mod test_fees { #[test] #[ignore] fn block_cost() { - let raw_fee = WeightToFee::calc(&MaximumBlockWeight::get()); + let max_block_weight = BlockWeights::get().max_block; + let raw_fee = WeightToFee::calc(&max_block_weight); println!( "Full Block weight == {} // WeightToFee(full_block) == {} plank", - MaximumBlockWeight::get(), + max_block_weight, raw_fee.separated_string(), ); } diff --git a/runtime/kusama/src/weights/frame_system.rs b/runtime/kusama/src/weights/frame_system.rs index 482ca0767c7737b3168523a1207f2dab8b520314..b44c68542e73cc5c6595ef424c5c81adbdcfcadb 100644 --- a/runtime/kusama/src/weights/frame_system.rs +++ b/runtime/kusama/src/weights/frame_system.rs @@ -15,7 +15,25 @@ // along with Polkadot. If not, see . //! Weights for frame_system //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-10-30, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// --chain +// kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header +// ./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,36 +41,37 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for frame_system. pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - fn remark(_b: u32) -> Weight { - (1_861_000 as Weight) +impl frame_system::WeightInfo for WeightInfo { + fn remark(_b: u32, ) -> Weight { + (1_815_000 as Weight) } fn set_heap_pages() -> Weight { - (2_431_000 as Weight) + (2_463_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_changes_trie_config() -> Weight { - (9_680_000 as Weight) + (11_280_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((793_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((821_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((552_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((549_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_prefix(p: u32, ) -> Weight { (0 as Weight) - .saturating_add((865_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((872_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn suicide() -> Weight { - (35_363_000 as Weight) + (35_050_000 as Weight) } } diff --git a/runtime/kusama/src/weights/pallet_balances.rs b/runtime/kusama/src/weights/pallet_balances.rs index 66ea7a83621bdd314dc2bf830af8c023e15b0d2a..7e6848c2fb3ac39382ceb245fab79dd452da83d4 100644 --- a/runtime/kusama/src/weights/pallet_balances.rs +++ b/runtime/kusama/src/weights/pallet_balances.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_balances +//! Autogenerated weights for pallet_balances +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_balances +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,30 +40,31 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_balances. pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { +impl pallet_balances::WeightInfo for WeightInfo { fn transfer() -> Weight { - (90_276_000 as Weight) + (93_087_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (61_636_000 as Weight) + (63_971_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (33_748_000 as Weight) + (35_018_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (42_563_000 as Weight) + (44_144_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (88_694_000 as Weight) + (91_707_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_collective.rs b/runtime/kusama/src/weights/pallet_collective.rs index 860f9b46c5ffe371332309fe3e42e4c6698c7966..678d3c316811486d060ddeccc09c13116b5091aa 100644 --- a/runtime/kusama/src/weights/pallet_collective.rs +++ b/runtime/kusama/src/weights/pallet_collective.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_collective +//! Autogenerated weights for pallet_collective +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_collective +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,77 +40,100 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_collective. pub struct WeightInfo(PhantomData); -impl pallet_collective::WeightInfo for WeightInfo { +impl pallet_collective::WeightInfo for WeightInfo { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { (0 as Weight) - .saturating_add((20_311_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((109_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((27_275_000 as Weight).saturating_mul(p as Weight)) + // Standard Error: 9_000 + .saturating_add((20_739_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 9_000 + .saturating_add((50_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 9_000 + .saturating_add((28_199_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn execute(b: u32, m: u32, ) -> Weight { - (29_660_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((112_000 as Weight).saturating_mul(m as Weight)) + (30_949_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((111_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn propose_execute(b: u32, m: u32, ) -> Weight { - (37_004_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + (37_904_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 .saturating_add((220_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) } fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (57_580_000 as Weight) - .saturating_add((6_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((128_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((577_000 as Weight).saturating_mul(p as Weight)) + (62_075_000 as Weight) + // Standard Error: 0 + .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((115_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((588_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn vote(m: u32, ) -> Weight { - (43_150_000 as Weight) - .saturating_add((277_000 as Weight).saturating_mul(m as Weight)) + (43_811_000 as Weight) + // Standard Error: 0 + .saturating_add((281_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (57_293_000 as Weight) - .saturating_add((214_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((523_000 as Weight).saturating_mul(p as Weight)) + (59_086_000 as Weight) + // Standard Error: 1_000 + .saturating_add((222_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 1_000 + .saturating_add((542_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (82_624_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((219_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((531_000 as Weight).saturating_mul(p as Weight)) + (84_535_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((221_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((557_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_disapproved(m: u32, p: u32, ) -> Weight { - (63_161_000 as Weight) + (65_098_000 as Weight) + // Standard Error: 0 .saturating_add((221_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((531_000 as Weight).saturating_mul(p as Weight)) + // Standard Error: 0 + .saturating_add((552_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (88_758_000 as Weight) + (90_884_000 as Weight) + // Standard Error: 0 .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((219_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((536_000 as Weight).saturating_mul(p as Weight)) + // Standard Error: 0 + .saturating_add((221_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((558_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn disapprove_proposal(p: u32, ) -> Weight { - (33_841_000 as Weight) - .saturating_add((530_000 as Weight).saturating_mul(p as Weight)) + (34_674_000 as Weight) + // Standard Error: 0 + .saturating_add((552_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_democracy.rs b/runtime/kusama/src/weights/pallet_democracy.rs index 46b2398718c0912123085065d1e1a6ab97cd92a9..d6ee82eb959a172c45080a36490716bfdc3b97c8 100644 --- a/runtime/kusama/src/weights/pallet_democracy.rs +++ b/runtime/kusama/src/weights/pallet_democracy.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_democracy +//! Autogenerated weights for pallet_democracy +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_democracy +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,148 +40,167 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_democracy. pub struct WeightInfo(PhantomData); -impl pallet_democracy::WeightInfo for WeightInfo { +impl pallet_democracy::WeightInfo for WeightInfo { fn propose() -> Weight { - (72_831_000 as Weight) + (76_513_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn second(s: u32, ) -> Weight { - (48_631_000 as Weight) - .saturating_add((185_000 as Weight).saturating_mul(s as Weight)) + (50_536_000 as Weight) + // Standard Error: 0 + .saturating_add((194_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn vote_new(r: u32, ) -> Weight { - (58_555_000 as Weight) - .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) + (60_328_000 as Weight) + // Standard Error: 0 + .saturating_add((227_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn vote_existing(r: u32, ) -> Weight { - (58_385_000 as Weight) - .saturating_add((229_000 as Weight).saturating_mul(r as Weight)) + (60_063_000 as Weight) + // Standard Error: 0 + .saturating_add((232_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn emergency_cancel() -> Weight { - (35_882_000 as Weight) + (37_941_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn blacklist(p: u32, ) -> Weight { - (116_728_000 as Weight) - .saturating_add((791_000 as Weight).saturating_mul(p as Weight)) + (121_082_000 as Weight) + // Standard Error: 7_000 + .saturating_add((816_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } fn external_propose(v: u32, ) -> Weight { - (17_952_000 as Weight) - .saturating_add((105_000 as Weight).saturating_mul(v as Weight)) + (18_656_000 as Weight) + // Standard Error: 0 + .saturating_add((107_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn external_propose_majority() -> Weight { - (4_114_000 as Weight) + (4_291_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn external_propose_default() -> Weight { - (4_110_000 as Weight) + (4_484_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn fast_track() -> Weight { - (36_363_000 as Weight) + (38_722_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn veto_external(v: u32, ) -> Weight { - (37_302_000 as Weight) - .saturating_add((175_000 as Weight).saturating_mul(v as Weight)) + (39_271_000 as Weight) + // Standard Error: 0 + .saturating_add((187_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn cancel_proposal(p: u32, ) -> Weight { - (80_683_000 as Weight) - .saturating_add((868_000 as Weight).saturating_mul(p as Weight)) + (84_923_000 as Weight) + // Standard Error: 0 + .saturating_add((879_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn cancel_referendum() -> Weight { - (21_590_000 as Weight) + (22_591_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel_queued(r: u32, ) -> Weight { - (40_401_000 as Weight) - .saturating_add((3_379_000 as Weight).saturating_mul(r as Weight)) + (42_351_000 as Weight) + // Standard Error: 1_000 + .saturating_add((3_421_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn on_initialize_base(r: u32, ) -> Weight { - (13_303_000 as Weight) - .saturating_add((6_439_000 as Weight).saturating_mul(r as Weight)) + (16_859_000 as Weight) + // Standard Error: 3_000 + .saturating_add((6_940_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } fn delegate(r: u32, ) -> Weight { - (76_954_000 as Weight) - .saturating_add((9_459_000 as Weight).saturating_mul(r as Weight)) + (81_043_000 as Weight) + // Standard Error: 2_000 + .saturating_add((10_032_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn undelegate(r: u32, ) -> Weight { - (38_797_000 as Weight) - .saturating_add((9_393_000 as Weight).saturating_mul(r as Weight)) + (40_537_000 as Weight) + // Standard Error: 2_000 + .saturating_add((10_019_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn clear_public_proposals() -> Weight { - (3_582_000 as Weight) + (3_649_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn note_preimage(b: u32, ) -> Weight { - (55_700_000 as Weight) + (57_601_000 as Weight) + // Standard Error: 0 .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn note_imminent_preimage(b: u32, ) -> Weight { - (37_682_000 as Weight) + (39_448_000 as Weight) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn reap_preimage(b: u32, ) -> Weight { - (50_749_000 as Weight) + (53_441_000 as Weight) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn unlock_remove(r: u32, ) -> Weight { - (49_131_000 as Weight) - .saturating_add((38_000 as Weight).saturating_mul(r as Weight)) + (50_814_000 as Weight) + // Standard Error: 0 + .saturating_add((39_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn unlock_set(r: u32, ) -> Weight { - (44_952_000 as Weight) - .saturating_add((219_000 as Weight).saturating_mul(r as Weight)) + (46_171_000 as Weight) + // Standard Error: 0 + .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn remove_vote(r: u32, ) -> Weight { - (26_662_000 as Weight) - .saturating_add((215_000 as Weight).saturating_mul(r as Weight)) + (28_134_000 as Weight) + // Standard Error: 0 + .saturating_add((217_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_other_vote(r: u32, ) -> Weight { - (26_998_000 as Weight) - .saturating_add((216_000 as Weight).saturating_mul(r as Weight)) + (28_233_000 as Weight) + // Standard Error: 0 + .saturating_add((219_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_elections_phragmen.rs b/runtime/kusama/src/weights/pallet_elections_phragmen.rs index 6f455973d202092249d54c43fcf4ba8ac4ca0bbe..93342c226600d0b91e0488fbd142145d3bc39b59 100644 --- a/runtime/kusama/src/weights/pallet_elections_phragmen.rs +++ b/runtime/kusama/src/weights/pallet_elections_phragmen.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_elections_phragmen +//! Autogenerated weights for pallet_elections_phragmen +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_elections_phragmen +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,77 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_elections_phragmen. pub struct WeightInfo(PhantomData); -impl pallet_elections_phragmen::WeightInfo for WeightInfo { +impl pallet_elections_phragmen::WeightInfo for WeightInfo { fn vote(v: u32, ) -> Weight { - (82_513_000 as Weight) - .saturating_add((120_000 as Weight).saturating_mul(v as Weight)) + (86_473_000 as Weight) + // Standard Error: 9_000 + .saturating_add((199_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vote_update(v: u32, ) -> Weight { - (51_149_000 as Weight) - .saturating_add((102_000 as Weight).saturating_mul(v as Weight)) + (53_531_000 as Weight) + // Standard Error: 8_000 + .saturating_add((126_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_voter() -> Weight { - (67_398_000 as Weight) + (69_725_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_676_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((33_438_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 2_000 + .saturating_add((1_673_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 53_000 + .saturating_add((33_921_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_678_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((33_333_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 0 + .saturating_add((1_696_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 12_000 + .saturating_add((33_906_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn submit_candidacy(c: u32, ) -> Weight { - (67_154_000 as Weight) - .saturating_add((273_000 as Weight).saturating_mul(c as Weight)) + (70_603_000 as Weight) + // Standard Error: 0 + .saturating_add((276_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (40_784_000 as Weight) - .saturating_add((141_000 as Weight).saturating_mul(c as Weight)) + (42_985_000 as Weight) + // Standard Error: 0 + .saturating_add((140_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_members() -> Weight { - (73_194_000 as Weight) + (76_320_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn renounce_candidacy_runners_up() -> Weight { - (44_186_000 as Weight) + (46_198_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_member_with_replacement() -> Weight { - (110_232_000 as Weight) + (115_357_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn remove_member_wrong_refund() -> Weight { - (8_411_000 as Weight) + (8_869_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } } diff --git a/runtime/kusama/src/weights/pallet_identity.rs b/runtime/kusama/src/weights/pallet_identity.rs index 724dab95754c523efe7f142cbf3af1a049295ad5..86d5e5261ccd820975d7410cc31ff3fd397b0d42 100644 --- a/runtime/kusama/src/weights/pallet_identity.rs +++ b/runtime/kusama/src/weights/pallet_identity.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_identity +//! Autogenerated weights for pallet_identity +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_identity +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,114 +40,139 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_identity. pub struct WeightInfo(PhantomData); -impl pallet_identity::WeightInfo for WeightInfo { +impl pallet_identity::WeightInfo for WeightInfo { fn add_registrar(r: u32, ) -> Weight { - (26_389_000 as Weight) - .saturating_add((287_000 as Weight).saturating_mul(r as Weight)) + (28_419_000 as Weight) + // Standard Error: 2_000 + .saturating_add((289_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_identity(r: u32, x: u32, ) -> Weight { - (70_587_000 as Weight) - .saturating_add((238_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_729_000 as Weight).saturating_mul(x as Weight)) + (73_891_000 as Weight) + // Standard Error: 19_000 + .saturating_add((279_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_000 + .saturating_add((1_819_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_subs_new(s: u32, ) -> Weight { - (50_147_000 as Weight) - .saturating_add((9_266_000 as Weight).saturating_mul(s as Weight)) + (52_415_000 as Weight) + // Standard Error: 1_000 + .saturating_add((9_876_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn set_subs_old(p: u32, ) -> Weight { - (46_920_000 as Weight) - .saturating_add((3_242_000 as Weight).saturating_mul(p as Weight)) + (48_406_000 as Weight) + // Standard Error: 0 + .saturating_add((3_392_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (60_595_000 as Weight) - .saturating_add((147_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_267_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_033_000 as Weight).saturating_mul(x as Weight)) + (61_817_000 as Weight) + // Standard Error: 8_000 + .saturating_add((202_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((3_417_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 1_000 + .saturating_add((1_075_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn request_judgement(r: u32, x: u32, ) -> Weight { - (70_072_000 as Weight) - .saturating_add((355_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_011_000 as Weight).saturating_mul(x as Weight)) + (73_843_000 as Weight) + // Standard Error: 9_000 + .saturating_add((348_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_085_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel_request(r: u32, x: u32, ) -> Weight { - (60_845_000 as Weight) - .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_993_000 as Weight).saturating_mul(x as Weight)) + (63_423_000 as Weight) + // Standard Error: 11_000 + .saturating_add((237_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_067_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_fee(r: u32, ) -> Weight { - (10_250_000 as Weight) + (10_954_000 as Weight) + // Standard Error: 1_000 .saturating_add((255_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_account_id(r: u32, ) -> Weight { - (11_514_000 as Weight) - .saturating_add((259_000 as Weight).saturating_mul(r as Weight)) + (12_327_000 as Weight) + // Standard Error: 1_000 + .saturating_add((263_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_fields(r: u32, ) -> Weight { - (10_238_000 as Weight) - .saturating_add((253_000 as Weight).saturating_mul(r as Weight)) + (11_006_000 as Weight) + // Standard Error: 1_000 + .saturating_add((255_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn provide_judgement(r: u32, x: u32, ) -> Weight { - (47_747_000 as Weight) - .saturating_add((273_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((2_000_000 as Weight).saturating_mul(x as Weight)) + (49_635_000 as Weight) + // Standard Error: 9_000 + .saturating_add((296_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_075_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { - (98_604_000 as Weight) - .saturating_add((111_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_280_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_000 as Weight).saturating_mul(x as Weight)) + (101_563_000 as Weight) + // Standard Error: 6_000 + .saturating_add((207_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((3_404_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((8_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn add_sub(s: u32, ) -> Weight { - (70_485_000 as Weight) + (73_298_000 as Weight) + // Standard Error: 0 .saturating_add((183_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn rename_sub(s: u32, ) -> Weight { - (23_378_000 as Weight) - .saturating_add((26_000 as Weight).saturating_mul(s as Weight)) + (23_667_000 as Weight) + // Standard Error: 0 + .saturating_add((25_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_sub(s: u32, ) -> Weight { - (66_778_000 as Weight) - .saturating_add((156_000 as Weight).saturating_mul(s as Weight)) + (69_636_000 as Weight) + // Standard Error: 0 + .saturating_add((160_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn quit_sub(s: u32, ) -> Weight { - (44_017_000 as Weight) - .saturating_add((154_000 as Weight).saturating_mul(s as Weight)) + (45_890_000 as Weight) + // Standard Error: 0 + .saturating_add((156_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_im_online.rs b/runtime/kusama/src/weights/pallet_im_online.rs index 4638da2375c30a1cad573e8b16c49ee43792be62..9281292314b05017b521be320eeb7a05451494c1 100644 --- a/runtime/kusama/src/weights/pallet_im_online.rs +++ b/runtime/kusama/src/weights/pallet_im_online.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_im_online +//! Autogenerated weights for pallet_im_online +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_im_online +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,11 +40,14 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_im_online. pub struct WeightInfo(PhantomData); -impl pallet_im_online::WeightInfo for WeightInfo { +impl pallet_im_online::WeightInfo for WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (107_607_000 as Weight) - .saturating_add((210_000 as Weight).saturating_mul(k as Weight)) + (112_814_000 as Weight) + // Standard Error: 0 + .saturating_add((215_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 2_000 .saturating_add((491_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) diff --git a/runtime/kusama/src/weights/pallet_indices.rs b/runtime/kusama/src/weights/pallet_indices.rs index eb990ea4f6adf23db1cd696dc2300b1e5af3a262..1f5274946142d1e2abf10be9e011c13d6d24a9e8 100644 --- a/runtime/kusama/src/weights/pallet_indices.rs +++ b/runtime/kusama/src/weights/pallet_indices.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_indices +//! Autogenerated weights for pallet_indices +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_indices +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,30 +40,31 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_indices. pub struct WeightInfo(PhantomData); -impl pallet_indices::WeightInfo for WeightInfo { +impl pallet_indices::WeightInfo for WeightInfo { fn claim() -> Weight { - (50_405_000 as Weight) + (53_201_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer() -> Weight { - (56_712_000 as Weight) + (59_579_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn free() -> Weight { - (45_117_000 as Weight) + (47_496_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (46_491_000 as Weight) + (49_084_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn freeze() -> Weight { - (42_038_000 as Weight) + (44_478_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_multisig.rs b/runtime/kusama/src/weights/pallet_multisig.rs index 2486b51963470a69c7749937d737a329a28ee85a..0cd3cf1968c25290a7c0041ef2bde5949988d4fb 100644 --- a/runtime/kusama/src/weights/pallet_multisig.rs +++ b/runtime/kusama/src/weights/pallet_multisig.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_multisig +//! Autogenerated weights for pallet_multisig +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_multisig +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,84 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_multisig. pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { +impl pallet_multisig::WeightInfo for WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight { - (12_114_000 as Weight) + (12_476_000 as Weight) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) } fn as_multi_create(s: u32, z: u32, ) -> Weight { - (64_959_000 as Weight) + (69_580_000 as Weight) + // Standard Error: 0 .saturating_add((89_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (73_539_000 as Weight) + (78_436_000 as Weight) + // Standard Error: 0 .saturating_add((92_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (39_655_000 as Weight) + (41_554_000 as Weight) + // Standard Error: 0 .saturating_add((108_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (70_971_000 as Weight) - .saturating_add((125_000 as Weight).saturating_mul(s as Weight)) + (74_444_000 as Weight) + // Standard Error: 0 + .saturating_add((124_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (81_735_000 as Weight) - .saturating_add((247_000 as Weight).saturating_mul(s as Weight)) + (85_497_000 as Weight) + // Standard Error: 0 + .saturating_add((245_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn approve_as_multi_create(s: u32, ) -> Weight { - (64_141_000 as Weight) - .saturating_add((91_000 as Weight).saturating_mul(s as Weight)) + (69_232_000 as Weight) + // Standard Error: 0 + .saturating_add((86_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_as_multi_approve(s: u32, ) -> Weight { - (38_382_000 as Weight) - .saturating_add((110_000 as Weight).saturating_mul(s as Weight)) + (40_932_000 as Weight) + // Standard Error: 0 + .saturating_add((107_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_as_multi_complete(s: u32, ) -> Weight { - (152_683_000 as Weight) - .saturating_add((253_000 as Weight).saturating_mul(s as Weight)) + (157_594_000 as Weight) + // Standard Error: 0 + .saturating_add((245_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn cancel_as_multi(s: u32, ) -> Weight { - (106_136_000 as Weight) - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + (109_613_000 as Weight) + // Standard Error: 0 + .saturating_add((89_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_proxy.rs b/runtime/kusama/src/weights/pallet_proxy.rs index 46e87e3446c370f70459e6e5e94841be73354d76..8943f9ef74375c808e92731cb81f070336b521df 100644 --- a/runtime/kusama/src/weights/pallet_proxy.rs +++ b/runtime/kusama/src/weights/pallet_proxy.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_proxy +//! Autogenerated weights for pallet_proxy +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_proxy +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,83 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_proxy. pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { +impl pallet_proxy::WeightInfo for WeightInfo { fn proxy(p: u32, ) -> Weight { - (29_879_000 as Weight) - .saturating_add((190_000 as Weight).saturating_mul(p as Weight)) + (30_904_000 as Weight) + // Standard Error: 1_000 + .saturating_add((196_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn proxy_announced(a: u32, p: u32, ) -> Weight { - (62_619_000 as Weight) - .saturating_add((816_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((193_000 as Weight).saturating_mul(p as Weight)) + (65_146_000 as Weight) + // Standard Error: 1_000 + .saturating_add((825_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((185_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_announcement(a: u32, p: u32, ) -> Weight { - (39_975_000 as Weight) - .saturating_add((809_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((10_000 as Weight).saturating_mul(p as Weight)) + (41_395_000 as Weight) + // Standard Error: 1_000 + .saturating_add((818_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((11_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn reject_announcement(a: u32, p: u32, ) -> Weight { - (39_897_000 as Weight) - .saturating_add((807_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((15_000 as Weight).saturating_mul(p as Weight)) + (41_431_000 as Weight) + // Standard Error: 1_000 + .saturating_add((820_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn announce(a: u32, p: u32, ) -> Weight { - (63_697_000 as Weight) - .saturating_add((694_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((188_000 as Weight).saturating_mul(p as Weight)) + (65_751_000 as Weight) + // Standard Error: 1_000 + .saturating_add((703_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((186_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn add_proxy(p: u32, ) -> Weight { - (42_291_000 as Weight) + (44_708_000 as Weight) + // Standard Error: 1_000 .saturating_add((196_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_proxy(p: u32, ) -> Weight { - (37_963_000 as Weight) - .saturating_add((234_000 as Weight).saturating_mul(p as Weight)) + (40_043_000 as Weight) + // Standard Error: 1_000 + .saturating_add((235_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_proxies(p: u32, ) -> Weight { - (36_357_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(p as Weight)) + (38_286_000 as Weight) + // Standard Error: 1_000 + .saturating_add((189_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn anonymous(p: u32, ) -> Weight { - (60_547_000 as Weight) - .saturating_add((30_000 as Weight).saturating_mul(p as Weight)) + (63_581_000 as Weight) + // Standard Error: 1_000 + .saturating_add((25_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn kill_anonymous(p: u32, ) -> Weight { - (39_210_000 as Weight) - .saturating_add((180_000 as Weight).saturating_mul(p as Weight)) + (41_113_000 as Weight) + // Standard Error: 1_000 + .saturating_add((187_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_scheduler.rs b/runtime/kusama/src/weights/pallet_scheduler.rs index 2caa59e66a45756186febf6d25aaac809ea4980b..f4bdbbfaf70d83bdc1ae65eec8d23b1f8c64407a 100644 --- a/runtime/kusama/src/weights/pallet_scheduler.rs +++ b/runtime/kusama/src/weights/pallet_scheduler.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_scheduler +//! Autogenerated weights for pallet_scheduler +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_scheduler +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,29 +40,34 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_scheduler. pub struct WeightInfo(PhantomData); -impl pallet_scheduler::WeightInfo for WeightInfo { +impl pallet_scheduler::WeightInfo for WeightInfo { fn schedule(s: u32, ) -> Weight { - (32_493_000 as Weight) - .saturating_add((49_000 as Weight).saturating_mul(s as Weight)) + (34_006_000 as Weight) + // Standard Error: 0 + .saturating_add((47_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel(s: u32, ) -> Weight { - (29_719_000 as Weight) - .saturating_add((3_071_000 as Weight).saturating_mul(s as Weight)) + (30_954_000 as Weight) + // Standard Error: 6_000 + .saturating_add((3_073_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn schedule_named(s: u32, ) -> Weight { - (41_811_000 as Weight) - .saturating_add((61_000 as Weight).saturating_mul(s as Weight)) + (44_217_000 as Weight) + // Standard Error: 1_000 + .saturating_add((66_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn cancel_named(s: u32, ) -> Weight { - (33_725_000 as Weight) - .saturating_add((3_082_000 as Weight).saturating_mul(s as Weight)) + (35_521_000 as Weight) + // Standard Error: 6_000 + .saturating_add((3_084_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_session.rs b/runtime/kusama/src/weights/pallet_session.rs index de4518005ddcc2d5850380e612d432c40b9c9544..1304deb4457e05817462b65ff4964e6723b75fb0 100644 --- a/runtime/kusama/src/weights/pallet_session.rs +++ b/runtime/kusama/src/weights/pallet_session.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_session +//! Autogenerated weights for pallet_session +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_session +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,15 +40,16 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_session. pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { +impl pallet_session::WeightInfo for WeightInfo { fn set_keys() -> Weight { - (87_465_000 as Weight) + (91_470_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } fn purge_keys() -> Weight { - (52_357_000 as Weight) + (53_966_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } diff --git a/runtime/kusama/src/weights/pallet_staking.rs b/runtime/kusama/src/weights/pallet_staking.rs index 35e2ebe2a861c575e27b46799b62c844daea4b86..7f19219ac50224926f22c270f594531bac3e6cfe 100644 --- a/runtime/kusama/src/weights/pallet_staking.rs +++ b/runtime/kusama/src/weights/pallet_staking.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_staking +//! Autogenerated weights for pallet_staking +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-30, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_staking +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,137 +40,151 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_staking. pub struct WeightInfo(PhantomData); -impl pallet_staking::WeightInfo for WeightInfo { +impl pallet_staking::WeightInfo for WeightInfo { fn bond() -> Weight { - (95_041_000 as Weight) + (97_060_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (73_981_000 as Weight) + (76_691_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (67_107_000 as Weight) + (69_501_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (67_795_000 as Weight) - .saturating_add((32_000 as Weight).saturating_mul(s as Weight)) + (69_487_000 as Weight) + // Standard Error: 0 + .saturating_add((28_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (110_537_000 as Weight) - .saturating_add((3_879_000 as Weight).saturating_mul(s as Weight)) + (113_859_000 as Weight) + // Standard Error: 1_000 + .saturating_add((3_977_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (22_802_000 as Weight) + (23_991_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn nominate(n: u32, ) -> Weight { - (29_784_000 as Weight) - .saturating_add((401_000 as Weight).saturating_mul(n as Weight)) + (31_051_000 as Weight) + // Standard Error: 12_000 + .saturating_add((398_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (22_719_000 as Weight) + (23_608_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (15_354_000 as Weight) + (16_106_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (33_783_000 as Weight) + (35_097_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (3_126_000 as Weight) + (3_247_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (3_548_000 as Weight) + (3_667_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (3_474_000 as Weight) + (3_661_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (3_518_000 as Weight) + (3_619_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (3_639_000 as Weight) + (3_787_000 as Weight) + // Standard Error: 0 .saturating_add((9_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (75_900_000 as Weight) - .saturating_add((3_891_000 as Weight).saturating_mul(s as Weight)) + (77_193_000 as Weight) + // Standard Error: 1_000 + .saturating_add((3_980_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_844_744_000 as Weight) - .saturating_add((34_644_000 as Weight).saturating_mul(s as Weight)) + (5_838_529_000 as Weight) + // Standard Error: 388_000 + .saturating_add((34_638_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (145_214_000 as Weight) - .saturating_add((57_875_000 as Weight).saturating_mul(n as Weight)) + (134_866_000 as Weight) + // Standard Error: 13_000 + .saturating_add((59_407_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (173_115_000 as Weight) - .saturating_add((76_912_000 as Weight).saturating_mul(n as Weight)) + (169_692_000 as Weight) + // Standard Error: 14_000 + .saturating_add((77_518_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(12 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (46_569_000 as Weight) - .saturating_add((104_000 as Weight).saturating_mul(l as Weight)) + (47_084_000 as Weight) + // Standard Error: 2_000 + .saturating_add((103_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - .saturating_add((36_641_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 63_000 + .saturating_add((38_667_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (95_718_000 as Weight) - .saturating_add((3_875_000 as Weight).saturating_mul(s as Weight)) + (97_545_000 as Weight) + // Standard Error: 0 + .saturating_add((3_988_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_262_144_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((143_471_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) + // Standard Error: 672_000 + .saturating_add((735_440_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 33_000 + .saturating_add((104_408_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(8 as Weight)) @@ -161,10 +192,14 @@ impl pallet_staking::WeightInfo for WeightInfo { } fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { (0 as Weight) - .saturating_add((879_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((488_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((133_102_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((8_073_000 as Weight).saturating_mul(w as Weight)) + // Standard Error: 45_000 + .saturating_add((1_479_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 18_000 + .saturating_add((630_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 45_000 + .saturating_add((99_647_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 94_000 + .saturating_add((8_674_000 as Weight).saturating_mul(w as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) diff --git a/runtime/kusama/src/weights/pallet_timestamp.rs b/runtime/kusama/src/weights/pallet_timestamp.rs index e3948c7c5fad5da2b69a15a3a4c6efc66968cd29..e4173ab5f960011d8156f3e5f59bbd9d672bf661 100644 --- a/runtime/kusama/src/weights/pallet_timestamp.rs +++ b/runtime/kusama/src/weights/pallet_timestamp.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_timestamp +//! Autogenerated weights for pallet_timestamp +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_timestamp +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,14 +40,15 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_timestamp. pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { +impl pallet_timestamp::WeightInfo for WeightInfo { fn set() -> Weight { - (10_860_000 as Weight) + (11_338_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_finalize() -> Weight { - (6_055_000 as Weight) + (6_080_000 as Weight) } } diff --git a/runtime/kusama/src/weights/pallet_treasury.rs b/runtime/kusama/src/weights/pallet_treasury.rs index d8fe9b578b2750f5b2edb9750f81b1a1be5d7e57..73e1b6a82a35d379e3857baa343279f38e0ecd3c 100644 --- a/runtime/kusama/src/weights/pallet_treasury.rs +++ b/runtime/kusama/src/weights/pallet_treasury.rs @@ -1,21 +1,38 @@ -// This file is part of Substrate. +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. -// Copyright (C) 2020 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . +//! Autogenerated weights for pallet_treasury +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_treasury +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0-rc6 #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,118 +40,127 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_treasury. pub struct WeightInfo(PhantomData); -impl pallet_treasury::WeightInfo for WeightInfo { +impl pallet_treasury::WeightInfo for WeightInfo { fn propose_spend() -> Weight { - (79604000 as Weight) + (53_899_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn reject_proposal() -> Weight { - (61001000 as Weight) + (84_208_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn approve_proposal() -> Weight { - (17835000 as Weight) + (11_576_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn report_awesome(r: u32, ) -> Weight { - (101602000 as Weight) - .saturating_add((2000 as Weight).saturating_mul(r as Weight)) + (66_679_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn retract_tip() -> Weight { - (82970000 as Weight) + (56_703_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn tip_new(r: u32, t: u32, ) -> Weight { - (63995000 as Weight) - .saturating_add((2000 as Weight).saturating_mul(r as Weight)) - .saturating_add((153000 as Weight).saturating_mul(t as Weight)) + (42_139_000 as Weight) + // Standard Error: 0 + .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((144_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn tip(t: u32, ) -> Weight { - (46765000 as Weight) - .saturating_add((711000 as Weight).saturating_mul(t as Weight)) + (27_980_000 as Weight) + // Standard Error: 1_000 + .saturating_add((691_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn close_tip(t: u32, ) -> Weight { - (160874000 as Weight) - .saturating_add((379000 as Weight).saturating_mul(t as Weight)) + (105_039_000 as Weight) + // Standard Error: 0 + .saturating_add((360_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn propose_bounty(d: u32, ) -> Weight { - (86198000 as Weight) - .saturating_add((1000 as Weight).saturating_mul(d as Weight)) + (57_452_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn approve_bounty() -> Weight { - (23063000 as Weight) + (15_509_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn propose_curator() -> Weight { - (18890000 as Weight) + (12_403_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn unassign_curator() -> Weight { - (66768000 as Weight) + (68_535_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn accept_curator() -> Weight { - (69131000 as Weight) + (48_001_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn award_bounty() -> Weight { - (48184000 as Weight) + (33_291_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn claim_bounty() -> Weight { - (243104000 as Weight) + (159_266_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn close_bounty_proposed() -> Weight { - (65917000 as Weight) + (67_667_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_bounty_active() -> Weight { - (157232000 as Weight) + (105_482_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn extend_bounty_expiry() -> Weight { - (46216000 as Weight) + (32_502_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_initialize_proposals(p: u32, ) -> Weight { - (119765000 as Weight) - .saturating_add((108368000 as Weight).saturating_mul(p as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + (108_437_000 as Weight) + // Standard Error: 16_000 + .saturating_add((70_932_000 as Weight).saturating_mul(p as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } fn on_initialize_bounties(b: u32, ) -> Weight { - (112536000 as Weight) - .saturating_add((107132000 as Weight).saturating_mul(b as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) + (105_767_000 as Weight) + // Standard Error: 17_000 + .saturating_add((70_197_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(b as Weight))) } } diff --git a/runtime/kusama/src/weights/pallet_utility.rs b/runtime/kusama/src/weights/pallet_utility.rs index 1ccd9750ed590ccb00ff52027d0e5f2d2360d6f8..10d6f0ac5aadbb4635bb2292afff69124960dd34 100644 --- a/runtime/kusama/src/weights/pallet_utility.rs +++ b/runtime/kusama/src/weights/pallet_utility.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_utility +//! Autogenerated weights for pallet_utility +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_utility +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,17 +40,20 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_utility. pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { +impl pallet_utility::WeightInfo for WeightInfo { fn batch(c: u32, ) -> Weight { - (18_450_000 as Weight) - .saturating_add((1_730_000 as Weight).saturating_mul(c as Weight)) + (19_612_000 as Weight) + // Standard Error: 0 + .saturating_add((1_988_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - (5_360_000 as Weight) + (5_849_000 as Weight) } fn batch_all(c: u32, ) -> Weight { - (21_104_000 as Weight) - .saturating_add((1_509_000 as Weight).saturating_mul(c as Weight)) + (21_934_000 as Weight) + // Standard Error: 0 + .saturating_add((1_503_000 as Weight).saturating_mul(c as Weight)) } } diff --git a/runtime/kusama/src/weights/pallet_vesting.rs b/runtime/kusama/src/weights/pallet_vesting.rs index 9351e377ba43532d2e5e307a8f00f9e6d7fb90cb..398b4da6782de6eb243fd0c8102c6ee4b5c39c94 100644 --- a/runtime/kusama/src/weights/pallet_vesting.rs +++ b/runtime/kusama/src/weights/pallet_vesting.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_vesting +//! Autogenerated weights for pallet_vesting +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=kusama-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_vesting +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/kusama/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,41 +40,48 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_vesting. pub struct WeightInfo(PhantomData); -impl pallet_vesting::WeightInfo for WeightInfo { +impl pallet_vesting::WeightInfo for WeightInfo { fn vest_locked(l: u32, ) -> Weight { - (53_650_000 as Weight) - .saturating_add((208_000 as Weight).saturating_mul(l as Weight)) + (54_809_000 as Weight) + // Standard Error: 0 + .saturating_add((133_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn vest_unlocked(l: u32, ) -> Weight { - (56_936_000 as Weight) - .saturating_add((106_000 as Weight).saturating_mul(l as Weight)) + (59_001_000 as Weight) + // Standard Error: 2_000 + .saturating_add((107_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vest_other_locked(l: u32, ) -> Weight { - (53_483_000 as Weight) - .saturating_add((209_000 as Weight).saturating_mul(l as Weight)) + (54_779_000 as Weight) + // Standard Error: 0 + .saturating_add((130_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vest_other_unlocked(l: u32, ) -> Weight { - (56_512_000 as Weight) - .saturating_add((106_000 as Weight).saturating_mul(l as Weight)) + (58_762_000 as Weight) + // Standard Error: 2_000 + .saturating_add((109_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn vested_transfer(l: u32, ) -> Weight { - (117_377_000 as Weight) - .saturating_add((247_000 as Weight).saturating_mul(l as Weight)) + (119_976_000 as Weight) + // Standard Error: 8_000 + .saturating_add((174_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn force_vested_transfer(l: u32, ) -> Weight { - (116_623_000 as Weight) - .saturating_add((249_000 as Weight).saturating_mul(l as Weight)) + (119_342_000 as Weight) + // Standard Error: 8_000 + .saturating_add((168_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } diff --git a/runtime/parachains/Cargo.toml b/runtime/parachains/Cargo.toml index 25d65b8da34c3fce3707b0c1e7cd4ba9b3a47685..93475b4d0a4c6ca17a379bd1c798cebde6110a67 100644 --- a/runtime/parachains/Cargo.toml +++ b/runtime/parachains/Cargo.toml @@ -6,54 +6,55 @@ edition = "2018" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -log = { version = "0.3.9", optional = true } -rustc-hex = { version = "2.0.1", default-features = false } -serde = { version = "1.0.102", default-features = false } -serde_derive = { version = "1.0.102", optional = true } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +log = "0.4.11" +rustc-hex = { version = "2.1.0", default-features = false } +serde = { version = "1.0.118", features = [ "derive" ], optional = true } +derive_more = "0.99.11" -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master", optional = true } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", optional = true } -pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +xcm = { package = "xcm", path = "../../xcm", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } -libsecp256k1 = { version = "0.3.2", default-features = false, optional = true } +libsecp256k1 = { version = "0.3.5", default-features = false, optional = true } -rand = { version = "0.7", default-features = false } +rand = { version = "0.7.3", default-features = false } rand_chacha = { version = "0.2.2", default-features = false } [dev-dependencies] -futures = "0.3.4" -hex-literal = "0.2.1" -keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master" } -serde_json = "1.0.41" -libsecp256k1 = "0.3.2" -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master"} +futures = "0.3.8" +hex-literal = "0.3.1" +keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +serde_json = "1.0.60" +libsecp256k1 = "0.3.5" +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27"} [features] @@ -61,11 +62,9 @@ default = ["std"] no_std = [] std = [ "bitvec/std", - "codec/std", - "log", + "parity-scale-codec/std", "rustc-hex/std", - "serde_derive", - "serde/std", + "serde", "primitives/std", "inherents/std", "sp-core/std", @@ -84,6 +83,7 @@ std = [ "frame-system/std", "pallet-timestamp/std", "pallet-vesting/std", + "xcm/std", ] runtime-benchmarks = [ "libsecp256k1/hmac", diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 14a57ded184edf0047de61da7e96df28b1ba47ea..af693ed67629d853bb2e9e443292786d15311c7b 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -19,18 +19,20 @@ //! Configuration can change only at session boundaries and is buffered until then. use sp_std::prelude::*; -use primitives::v1::ValidatorId; +use primitives::v1::{Balance, ValidatorId, SessionIndex}; use frame_support::{ decl_storage, decl_module, decl_error, + ensure, dispatch::DispatchResult, weights::{DispatchClass, Weight}, }; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use frame_system::ensure_root; +use sp_runtime::traits::Zero; /// All configuration of the runtime with respect to parachains and parathreads. -#[derive(Clone, Encode, Decode, PartialEq, Default)] -#[cfg_attr(test, derive(Debug))] +#[derive(Clone, Encode, Decode, PartialEq, sp_core::RuntimeDebug)] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct HostConfiguration { /// The minimum frequency at which parachains can update their validation code. pub validation_upgrade_frequency: BlockNumber, @@ -43,21 +45,55 @@ pub struct HostConfiguration { pub max_code_size: u32, /// The maximum head-data size, in bytes. pub max_head_data_size: u32, + /// The maximum POV block size, in bytes. + pub max_pov_size: u32, /// The amount of execution cores to dedicate to parathread execution. pub parathread_cores: u32, /// The number of retries that a parathread author has to submit their block. pub parathread_retries: u32, - /// How often parachain groups should be rotated across parachains. Must be non-zero. + /// How often parachain groups should be rotated across parachains. + /// + /// Must be non-zero. pub group_rotation_frequency: BlockNumber, /// The availability period, in blocks, for parachains. This is the amount of blocks /// after inclusion that validators have to make the block available and signal its availability to - /// the chain. Must be at least 1. + /// the chain. + /// + /// Must be at least 1. pub chain_availability_period: BlockNumber, /// The availability period, in blocks, for parathreads. Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. Must be at least 1. + /// but a differing timeout due to differing requirements. + /// + /// Must be at least 1. pub thread_availability_period: BlockNumber, /// The amount of blocks ahead to schedule parachains and parathreads. pub scheduling_lookahead: u32, + /// The maximum number of validators to have per core. + /// + /// `None` means no maximum. + pub max_validators_per_core: Option, + /// The amount of sessions to keep for disputes. + pub dispute_period: SessionIndex, + /// The amount of consensus slots that must pass between submitting an assignment and + /// submitting an approval vote before a validator is considered a no-show. + /// + /// Must be at least 1. + pub no_show_slots: u32, + /// The number of delay tranches in total. + pub n_delay_tranches: u32, + /// The width of the zeroth delay tranche for approval assignments. This many delay tranches + /// beyond 0 are all consolidated to form a wide 0 tranche. + pub zeroth_delay_tranche_width: u32, + /// The number of validators needed to approve a block. + pub needed_approvals: u32, + /// The number of samples to do of the RelayVRFModulo approval assignment criterion. + pub relay_vrf_modulo_samples: u32, + /// Total number of individual messages allowed in the parachain -> relay-chain message queue. + pub max_upward_queue_count: u32, + /// Total size of messages allowed in the parachain -> relay-chain message queue before which + /// no further messages may be added to it. If it exceeds this then the queue may contain only + /// a single message. + pub max_upward_queue_size: u32, /// The maximum size of a message that can be put in a downward message queue. /// /// Since we require receiving at least one DMP message the obvious upper bound of the size is @@ -65,26 +101,141 @@ pub struct HostConfiguration { /// decide to do with its PoV so this value in practice will be picked as a fraction of the PoV /// size. pub max_downward_message_size: u32, + /// The amount of weight we wish to devote to the processing the dispatchable upward messages + /// stage. + /// + /// NOTE that this is a soft limit and could be exceeded. + pub preferred_dispatchable_upward_messages_step_weight: Weight, + /// The maximum size of an upward message that can be sent by a candidate. + /// + /// This parameter affects the size upper bound of the `CandidateCommitments`. + pub max_upward_message_size: u32, + /// The maximum number of messages that a candidate can contain. + /// + /// This parameter affects the size upper bound of the `CandidateCommitments`. + pub max_upward_message_num_per_candidate: u32, + /// Number of sessions after which an HRMP open channel request expires. + pub hrmp_open_request_ttl: u32, + /// The deposit that the sender should provide for opening an HRMP channel. + pub hrmp_sender_deposit: Balance, + /// The deposit that the recipient should provide for accepting opening an HRMP channel. + pub hrmp_recipient_deposit: Balance, + /// The maximum number of messages allowed in an HRMP channel at once. + pub hrmp_channel_max_capacity: u32, + /// The maximum total size of messages in bytes allowed in an HRMP channel at once. + pub hrmp_channel_max_total_size: u32, + /// The maximum number of inbound HRMP channels a parachain is allowed to accept. + pub hrmp_max_parachain_inbound_channels: u32, + /// The maximum number of inbound HRMP channels a parathread is allowed to accept. + pub hrmp_max_parathread_inbound_channels: u32, + /// The maximum size of a message that could ever be put into an HRMP channel. + /// + /// This parameter affects the upper bound of size of `CandidateCommitments`. + pub hrmp_channel_max_message_size: u32, + /// The maximum number of outbound HRMP channels a parachain is allowed to open. + pub hrmp_max_parachain_outbound_channels: u32, + /// The maximum number of outbound HRMP channels a parathread is allowed to open. + pub hrmp_max_parathread_outbound_channels: u32, + /// The maximum number of outbound HRMP messages can be sent by a candidate. + /// + /// This parameter affects the upper bound of size of `CandidateCommitments`. + pub hrmp_max_message_num_per_candidate: u32, } -pub trait Trait: frame_system::Trait { } +impl> Default for HostConfiguration { + fn default() -> Self { + Self { + group_rotation_frequency: 1u32.into(), + chain_availability_period: 1u32.into(), + thread_availability_period: 1u32.into(), + no_show_slots: 1u32.into(), + validation_upgrade_frequency: Default::default(), + validation_upgrade_delay: Default::default(), + acceptance_period: Default::default(), + max_code_size: Default::default(), + max_pov_size: Default::default(), + max_head_data_size: Default::default(), + parathread_cores: Default::default(), + parathread_retries: Default::default(), + scheduling_lookahead: Default::default(), + max_validators_per_core: Default::default(), + dispute_period: Default::default(), + n_delay_tranches: Default::default(), + zeroth_delay_tranche_width: Default::default(), + needed_approvals: Default::default(), + relay_vrf_modulo_samples: Default::default(), + max_upward_queue_count: Default::default(), + max_upward_queue_size: Default::default(), + max_downward_message_size: Default::default(), + preferred_dispatchable_upward_messages_step_weight: Default::default(), + max_upward_message_size: Default::default(), + max_upward_message_num_per_candidate: Default::default(), + hrmp_open_request_ttl: Default::default(), + hrmp_sender_deposit: Default::default(), + hrmp_recipient_deposit: Default::default(), + hrmp_channel_max_capacity: Default::default(), + hrmp_channel_max_total_size: Default::default(), + hrmp_max_parachain_inbound_channels: Default::default(), + hrmp_max_parathread_inbound_channels: Default::default(), + hrmp_channel_max_message_size: Default::default(), + hrmp_max_parachain_outbound_channels: Default::default(), + hrmp_max_parathread_outbound_channels: Default::default(), + hrmp_max_message_num_per_candidate: Default::default(), + } + } +} + +impl HostConfiguration { + /// Checks that this instance is consistent with the requirements on each individual member. + /// + /// # Panic + /// + /// This function panics if any member is not set properly. + fn check_consistency(&self) { + if self.group_rotation_frequency.is_zero() { + panic!("`group_rotation_frequency` must be non-zero!") + } + + if self.chain_availability_period.is_zero() { + panic!("`chain_availability_period` must be at least 1!") + } + + if self.thread_availability_period.is_zero() { + panic!("`thread_availability_period` must be at least 1!") + } + + if self.no_show_slots.is_zero() { + panic!("`no_show_slots` must be at least 1!") + } + } +} + +pub trait Config: frame_system::Config { } decl_storage! { - trait Store for Module as Configuration { + trait Store for Module as Configuration { /// The active configuration for the current session. - Config get(fn config) config(): HostConfiguration; + ActiveConfig get(fn config) config(): HostConfiguration; /// Pending configuration (if any) for the next session. PendingConfig: Option>; } + add_extra_genesis { + build(|config: &Self| { + config.config.check_consistency(); + }) + } } decl_error! { - pub enum Error for Module { } + pub enum Error for Module { + /// The new value for a configuration parameter is invalid. + InvalidNewValue, + } } decl_module! { /// The parachains configuration module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; /// Set the validation upgrade frequency. @@ -127,6 +278,16 @@ decl_module! { Ok(()) } + /// Set the max POV block size for incoming upgrades. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_max_pov_size(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.max_pov_size, new) != new + }); + Ok(()) + } + /// Set the max head data size for paras. #[weight = (1_000, DispatchClass::Operational)] pub fn set_max_head_data_size(origin, new: u32) -> DispatchResult { @@ -162,6 +323,9 @@ decl_module! { #[weight = (1_000, DispatchClass::Operational)] pub fn set_group_rotation_frequency(origin, new: T::BlockNumber) -> DispatchResult { ensure_root(origin)?; + + ensure!(!new.is_zero(), Error::::InvalidNewValue); + Self::update_config_member(|config| { sp_std::mem::replace(&mut config.group_rotation_frequency, new) != new }); @@ -172,6 +336,9 @@ decl_module! { #[weight = (1_000, DispatchClass::Operational)] pub fn set_chain_availability_period(origin, new: T::BlockNumber) -> DispatchResult { ensure_root(origin)?; + + ensure!(!new.is_zero(), Error::::InvalidNewValue); + Self::update_config_member(|config| { sp_std::mem::replace(&mut config.chain_availability_period, new) != new }); @@ -182,6 +349,9 @@ decl_module! { #[weight = (1_000, DispatchClass::Operational)] pub fn set_thread_availability_period(origin, new: T::BlockNumber) -> DispatchResult { ensure_root(origin)?; + + ensure!(!new.is_zero(), Error::::InvalidNewValue); + Self::update_config_member(|config| { sp_std::mem::replace(&mut config.thread_availability_period, new) != new }); @@ -198,6 +368,100 @@ decl_module! { Ok(()) } + /// Set the maximum number of validators to assign to any core. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_max_validators_per_core(origin, new: Option) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.max_validators_per_core, new) != new + }); + Ok(()) + } + + /// Set the dispute period, in number of sessions to keep for disputes. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_dispute_period(origin, new: SessionIndex) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.dispute_period, new) != new + }); + Ok(()) + } + + /// Set the no show slots, in number of number of consensus slots. + /// Must be at least 1. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_no_show_slots(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + + ensure!(!new.is_zero(), Error::::InvalidNewValue); + + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.no_show_slots, new) != new + }); + Ok(()) + } + + /// Set the total number of delay tranches. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_n_delay_tranches(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.n_delay_tranches, new) != new + }); + Ok(()) + } + + /// Set the zeroth delay tranche width. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_zeroth_delay_tranche_width(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.zeroth_delay_tranche_width, new) != new + }); + Ok(()) + } + + /// Set the number of validators needed to approve a block. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_needed_approvals(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.needed_approvals, new) != new + }); + Ok(()) + } + + /// Set the number of samples to do of the RelayVRFModulo approval assignment criterion. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_relay_vrf_modulo_samples(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.relay_vrf_modulo_samples, new) != new + }); + Ok(()) + } + + /// Sets the maximum items that can present in a upward dispatch queue at once. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_max_upward_queue_count(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.max_upward_queue_count, new) != new + }); + Ok(()) + } + + /// Sets the maximum total size of items that can present in a upward dispatch queue at once. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_max_upward_queue_size(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.max_upward_queue_size, new) != new + }); + Ok(()) + } + /// Set the critical downward message size. #[weight = (1_000, DispatchClass::Operational)] pub fn set_max_downward_message_size(origin, new: u32) -> DispatchResult { @@ -207,10 +471,151 @@ decl_module! { }); Ok(()) } + + /// Sets the soft limit for the phase of dispatching dispatchable upward messages. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_preferred_dispatchable_upward_messages_step_weight(origin, new: Weight) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.preferred_dispatchable_upward_messages_step_weight, new) != new + }); + Ok(()) + } + + /// Sets the maximum size of an upward message that can be sent by a candidate. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_max_upward_message_size(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.max_upward_message_size, new) != new + }); + Ok(()) + } + + /// Sets the maximum number of messages that a candidate can contain. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_max_upward_message_num_per_candidate(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.max_upward_message_num_per_candidate, new) != new + }); + Ok(()) + } + + /// Sets the number of sessions after which an HRMP open channel request expires. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_open_request_ttl(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_open_request_ttl, new) != new + }); + Ok(()) + } + + /// Sets the amount of funds that the sender should provide for opening an HRMP channel. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_sender_deposit(origin, new: Balance) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_sender_deposit, new) != new + }); + Ok(()) + } + + /// Sets the amount of funds that the recipient should provide for accepting opening an HRMP + /// channel. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_recipient_deposit(origin, new: Balance) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_recipient_deposit, new) != new + }); + Ok(()) + } + + /// Sets the maximum number of messages allowed in an HRMP channel at once. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_channel_max_capacity(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_channel_max_capacity, new) != new + }); + Ok(()) + } + + /// Sets the maximum total size of messages in bytes allowed in an HRMP channel at once. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_channel_max_total_size(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_channel_max_total_size, new) != new + }); + Ok(()) + } + + /// Sets the maximum number of inbound HRMP channels a parachain is allowed to accept. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_max_parachain_inbound_channels(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_max_parachain_inbound_channels, new) != new + }); + Ok(()) + } + + /// Sets the maximum number of inbound HRMP channels a parathread is allowed to accept. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_max_parathread_inbound_channels(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_max_parathread_inbound_channels, new) != new + }); + Ok(()) + } + + /// Sets the maximum size of a message that could ever be put into an HRMP channel. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_channel_max_message_size(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_channel_max_message_size, new) != new + }); + Ok(()) + } + + /// Sets the maximum number of outbound HRMP channels a parachain is allowed to open. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_max_parachain_outbound_channels(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_max_parachain_outbound_channels, new) != new + }); + Ok(()) + } + + /// Sets the maximum number of outbound HRMP channels a parathread is allowed to open. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_max_parathread_outbound_channels(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_max_parathread_outbound_channels, new) != new + }); + Ok(()) + } + + /// Sets the maximum number of outbound HRMP messages can be sent by a candidate. + #[weight = (1_000, DispatchClass::Operational)] + pub fn set_hrmp_max_message_num_per_candidate(origin, new: u32) -> DispatchResult { + ensure_root(origin)?; + Self::update_config_member(|config| { + sp_std::mem::replace(&mut config.hrmp_max_message_num_per_candidate, new) != new + }); + Ok(()) + } } } -impl Module { +impl Module { /// Called by the initializer to initialize the configuration module. pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { 0 @@ -222,7 +627,7 @@ impl Module { /// Called by the initializer to note that a new session has started. pub(crate) fn initializer_on_new_session(_validators: &[ValidatorId], _queued: &[ValidatorId]) { if let Some(pending) = ::PendingConfig::take() { - ::Config::set(pending); + ::ActiveConfig::set(pending); } } @@ -278,6 +683,7 @@ mod tests { validation_upgrade_delay: 10, acceptance_period: 5, max_code_size: 100_000, + max_pov_size: 1024, max_head_data_size: 1_000, parathread_cores: 2, parathread_retries: 5, @@ -285,7 +691,30 @@ mod tests { chain_availability_period: 10, thread_availability_period: 8, scheduling_lookahead: 3, + max_validators_per_core: None, + dispute_period: 239, + no_show_slots: 240, + n_delay_tranches: 241, + zeroth_delay_tranche_width: 242, + needed_approvals: 242, + relay_vrf_modulo_samples: 243, + max_upward_queue_count: 1337, + max_upward_queue_size: 228, max_downward_message_size: 2048, + preferred_dispatchable_upward_messages_step_weight: 20000, + max_upward_message_size: 448, + max_upward_message_num_per_candidate: 5, + hrmp_open_request_ttl: 1312, + hrmp_sender_deposit: 22, + hrmp_recipient_deposit: 4905, + hrmp_channel_max_capacity: 3921, + hrmp_channel_max_total_size: 7687, + hrmp_max_parachain_inbound_channels: 3722, + hrmp_max_parathread_inbound_channels: 1967, + hrmp_channel_max_message_size: 8192, + hrmp_max_parachain_outbound_channels: 100, + hrmp_max_parathread_outbound_channels: 200, + hrmp_max_message_num_per_candidate: 20, }; assert!(::PendingConfig::get().is_none()); @@ -302,6 +731,9 @@ mod tests { Configuration::set_max_code_size( Origin::root(), new_config.max_code_size, ).unwrap(); + Configuration::set_max_pov_size( + Origin::root(), new_config.max_pov_size, + ).unwrap(); Configuration::set_max_head_data_size( Origin::root(), new_config.max_head_data_size, ).unwrap(); @@ -323,9 +755,89 @@ mod tests { Configuration::set_scheduling_lookahead( Origin::root(), new_config.scheduling_lookahead, ).unwrap(); + Configuration::set_max_validators_per_core( + Origin::root(), new_config.max_validators_per_core, + ).unwrap(); + Configuration::set_dispute_period( + Origin::root(), new_config.dispute_period, + ).unwrap(); + Configuration::set_no_show_slots( + Origin::root(), new_config.no_show_slots, + ).unwrap(); + Configuration::set_n_delay_tranches( + Origin::root(), new_config.n_delay_tranches, + ).unwrap(); + Configuration::set_zeroth_delay_tranche_width( + Origin::root(), new_config.zeroth_delay_tranche_width, + ).unwrap(); + Configuration::set_needed_approvals( + Origin::root(), new_config.needed_approvals, + ).unwrap(); + Configuration::set_relay_vrf_modulo_samples( + Origin::root(), new_config.relay_vrf_modulo_samples, + ).unwrap(); + Configuration::set_max_upward_queue_count( + Origin::root(), new_config.max_upward_queue_count, + ).unwrap(); + Configuration::set_max_upward_queue_size( + Origin::root(), new_config.max_upward_queue_size, + ).unwrap(); Configuration::set_max_downward_message_size( Origin::root(), new_config.max_downward_message_size, ).unwrap(); + Configuration::set_preferred_dispatchable_upward_messages_step_weight( + Origin::root(), new_config.preferred_dispatchable_upward_messages_step_weight, + ).unwrap(); + Configuration::set_max_upward_message_size( + Origin::root(), new_config.max_upward_message_size, + ).unwrap(); + Configuration::set_max_upward_message_num_per_candidate( + Origin::root(), new_config.max_upward_message_num_per_candidate, + ).unwrap(); + Configuration::set_hrmp_open_request_ttl( + Origin::root(), + new_config.hrmp_open_request_ttl, + ).unwrap(); + Configuration::set_hrmp_sender_deposit( + Origin::root(), + new_config.hrmp_sender_deposit, + ).unwrap(); + Configuration::set_hrmp_recipient_deposit( + Origin::root(), + new_config.hrmp_recipient_deposit, + ).unwrap(); + Configuration::set_hrmp_channel_max_capacity( + Origin::root(), + new_config.hrmp_channel_max_capacity, + ).unwrap(); + Configuration::set_hrmp_channel_max_total_size( + Origin::root(), + new_config.hrmp_channel_max_total_size, + ).unwrap(); + Configuration::set_hrmp_max_parachain_inbound_channels( + Origin::root(), + new_config.hrmp_max_parachain_inbound_channels, + ).unwrap(); + Configuration::set_hrmp_max_parathread_inbound_channels( + Origin::root(), + new_config.hrmp_max_parathread_inbound_channels, + ).unwrap(); + Configuration::set_hrmp_channel_max_message_size( + Origin::root(), + new_config.hrmp_channel_max_message_size, + ).unwrap(); + Configuration::set_hrmp_max_parachain_outbound_channels( + Origin::root(), + new_config.hrmp_max_parachain_outbound_channels, + ).unwrap(); + Configuration::set_hrmp_max_parathread_outbound_channels( + Origin::root(), + new_config.hrmp_max_parathread_outbound_channels, + ).unwrap(); + Configuration::set_hrmp_max_message_num_per_candidate( + Origin::root(), + new_config.hrmp_max_message_num_per_candidate, + ).unwrap(); assert_eq!(::PendingConfig::get(), Some(new_config)); }) diff --git a/runtime/parachains/src/router/dmp.rs b/runtime/parachains/src/dmp.rs similarity index 53% rename from runtime/parachains/src/router/dmp.rs rename to runtime/parachains/src/dmp.rs index fa0d057c01656d7fd0be29aa209dd5e22b8b8333..49fb2f8a5732303a6973b2dbfd8b273a0bde08b7 100644 --- a/runtime/parachains/src/router/dmp.rs +++ b/runtime/parachains/src/dmp.rs @@ -14,10 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{Trait, Module, Store}; -use crate::configuration::HostConfiguration; -use frame_support::{StorageMap, weights::Weight, traits::Get}; -use sp_std::prelude::*; +use crate::{ + configuration::{self, HostConfiguration}, + initializer, +}; +use frame_support::{decl_module, decl_storage, StorageMap, weights::Weight, traits::Get}; +use sp_std::{fmt, prelude::*}; use sp_runtime::traits::{BlakeTwo256, Hash as HashT, SaturatedConversion}; use primitives::v1::{Id as ParaId, DownwardMessage, InboundDownwardMessage, Hash}; @@ -28,13 +30,104 @@ pub enum QueueDownwardMessageError { ExceedsMaxMessageSize, } +/// An error returned by [`check_processed_downward_messages`] that indicates an acceptance check +/// didn't pass. +pub enum ProcessedDownwardMessagesAcceptanceErr { + /// If there are pending messages then `processed_downward_messages` should be at least 1, + AdvancementRule, + /// `processed_downward_messages` should not be greater than the number of pending messages. + Underflow { + processed_downward_messages: u32, + dmq_length: u32, + }, +} + +impl fmt::Debug for ProcessedDownwardMessagesAcceptanceErr { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use ProcessedDownwardMessagesAcceptanceErr::*; + match *self { + AdvancementRule => write!( + fmt, + "DMQ is not empty, but processed_downward_messages is 0", + ), + Underflow { + processed_downward_messages, + dmq_length, + } => write!( + fmt, + "processed_downward_messages = {}, but dmq_length is only {}", + processed_downward_messages, dmq_length, + ), + } + } +} + +pub trait Config: frame_system::Config + configuration::Config {} + +decl_storage! { + trait Store for Module as Dmp { + /// Paras that are to be cleaned up at the end of the session. + /// The entries are sorted ascending by the para id. + OutgoingParas: Vec; + + /// The downward messages addressed for a certain para. + DownwardMessageQueues: map hasher(twox_64_concat) ParaId => Vec>; + /// A mapping that stores the downward message queue MQC head for each para. + /// + /// Each link in this chain has a form: + /// `(prev_head, B, H(M))`, where + /// - `prev_head`: is the previous head hash or zero if none. + /// - `B`: is the relay-chain block number in which a message was appended. + /// - `H(M)`: is the hash of the message being appended. + DownwardMessageQueueHeads: map hasher(twox_64_concat) ParaId => Hash; + } +} + +decl_module! { + /// The DMP module. + pub struct Module for enum Call where origin: ::Origin { } +} + /// Routines and getters related to downward message passing. -impl Module { - pub(crate) fn clean_dmp_after_outgoing(outgoing_para: ParaId) { +impl Module { + /// Block initialization logic, called by initializer. + pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { + 0 + } + + /// Block finalization logic, called by initializer. + pub(crate) fn initializer_finalize() {} + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + _notification: &initializer::SessionChangeNotification, + ) { + Self::perform_outgoing_para_cleanup(); + } + + /// Iterate over all paras that were registered for offboarding and remove all the data + /// associated with them. + fn perform_outgoing_para_cleanup() { + let outgoing = OutgoingParas::take(); + for outgoing_para in outgoing { + Self::clean_dmp_after_outgoing(outgoing_para); + } + } + + fn clean_dmp_after_outgoing(outgoing_para: ParaId) { ::DownwardMessageQueues::remove(&outgoing_para); ::DownwardMessageQueueHeads::remove(&outgoing_para); } + /// Schedule a para to be cleaned up at the start of the next session. + pub(crate) fn schedule_para_cleanup(id: ParaId) { + OutgoingParas::mutate(|v| { + if let Err(i) = v.binary_search(&id) { + v.insert(i, id); + } + }); + } + /// Enqueue a downward message to a specific recipient para. /// /// When encoded, the message should not exceed the `config.max_downward_message_size`. @@ -72,26 +165,24 @@ impl Module { Ok(()) } - /// Checks if the number of processed downward messages is valid, i.e.: - /// - /// - if there are pending messages then `processed_downward_messages` should be at least 1, - /// - `processed_downward_messages` should not be greater than the number of pending messages. - /// - /// Returns true if all checks have been passed. + /// Checks if the number of processed downward messages is valid. pub(crate) fn check_processed_downward_messages( para: ParaId, processed_downward_messages: u32, - ) -> bool { + ) -> Result<(), ProcessedDownwardMessagesAcceptanceErr> { let dmq_length = Self::dmq_length(para); if dmq_length > 0 && processed_downward_messages == 0 { - return false; + return Err(ProcessedDownwardMessagesAcceptanceErr::AdvancementRule); } if dmq_length < processed_downward_messages { - return false; + return Err(ProcessedDownwardMessagesAcceptanceErr::Underflow { + processed_downward_messages, + dmq_length, + }); } - true + Ok(()) } /// Prunes the specified number of messages from the downward message queue of the given para. @@ -135,19 +226,45 @@ impl Module { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Configuration, Router, new_test_ext}; - use crate::router::{ - OutgoingParas, - tests::{default_genesis_config, run_to_block}, - }; + use primitives::v1::BlockNumber; use frame_support::StorageValue; - use codec::Encode; + use frame_support::traits::{OnFinalize, OnInitialize}; + use parity_scale_codec::Encode; + use crate::mock::{Configuration, new_test_ext, System, Dmp, GenesisConfig as MockGenesisConfig}; + + pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { + while System::block_number() < to { + let b = System::block_number(); + Dmp::initializer_finalize(); + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) { + Dmp::initializer_on_new_session(&Default::default()); + } + Dmp::initializer_initialize(b + 1); + } + } + + fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { + max_downward_message_size: 1024, + ..Default::default() + }, + }, + ..Default::default() + } + } fn queue_downward_message( para_id: ParaId, msg: DownwardMessage, ) -> Result<(), QueueDownwardMessageError> { - Router::queue_downward_message(&Configuration::config(), para_id, msg) + Dmp::queue_downward_message(&Configuration::config(), para_id, msg) } #[test] @@ -164,23 +281,23 @@ mod tests { queue_downward_message(b, vec![4, 5, 6]).unwrap(); queue_downward_message(c, vec![7, 8, 9]).unwrap(); - Router::schedule_para_cleanup(a); + Dmp::schedule_para_cleanup(a); // run to block without session change. run_to_block(2, None); - assert!(!::DownwardMessageQueues::get(&a).is_empty()); - assert!(!::DownwardMessageQueues::get(&b).is_empty()); - assert!(!::DownwardMessageQueues::get(&c).is_empty()); + assert!(!::DownwardMessageQueues::get(&a).is_empty()); + assert!(!::DownwardMessageQueues::get(&b).is_empty()); + assert!(!::DownwardMessageQueues::get(&c).is_empty()); - Router::schedule_para_cleanup(b); + Dmp::schedule_para_cleanup(b); // run to block changing the session. run_to_block(3, Some(vec![3])); - assert!(::DownwardMessageQueues::get(&a).is_empty()); - assert!(::DownwardMessageQueues::get(&b).is_empty()); - assert!(!::DownwardMessageQueues::get(&c).is_empty()); + assert!(::DownwardMessageQueues::get(&a).is_empty()); + assert!(::DownwardMessageQueues::get(&b).is_empty()); + assert!(!::DownwardMessageQueues::get(&c).is_empty()); // verify that the outgoing paras are emptied. assert!(OutgoingParas::get().is_empty()) @@ -193,15 +310,15 @@ mod tests { let b = ParaId::from(228); new_test_ext(default_genesis_config()).execute_with(|| { - assert_eq!(Router::dmq_length(a), 0); - assert_eq!(Router::dmq_length(b), 0); + assert_eq!(Dmp::dmq_length(a), 0); + assert_eq!(Dmp::dmq_length(b), 0); queue_downward_message(a, vec![1, 2, 3]).unwrap(); - assert_eq!(Router::dmq_length(a), 1); - assert_eq!(Router::dmq_length(b), 0); - assert!(!Router::dmq_mqc_head(a).is_zero()); - assert!(Router::dmq_mqc_head(b).is_zero()); + assert_eq!(Dmp::dmq_length(a), 1); + assert_eq!(Dmp::dmq_length(b), 0); + assert!(!Dmp::dmq_mqc_head(a).is_zero()); + assert!(Dmp::dmq_mqc_head(b).is_zero()); }); } @@ -211,20 +328,20 @@ mod tests { new_test_ext(default_genesis_config()).execute_with(|| { // processed_downward_messages=0 is allowed when the DMQ is empty. - assert!(Router::check_processed_downward_messages(a, 0)); + assert!(Dmp::check_processed_downward_messages(a, 0).is_ok()); queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(a, vec![4, 5, 6]).unwrap(); queue_downward_message(a, vec![7, 8, 9]).unwrap(); // 0 doesn't pass if the DMQ has msgs. - assert!(!Router::check_processed_downward_messages(a, 0)); + assert!(!Dmp::check_processed_downward_messages(a, 0).is_ok()); // a candidate can consume up to 3 messages - assert!(Router::check_processed_downward_messages(a, 1)); - assert!(Router::check_processed_downward_messages(a, 2)); - assert!(Router::check_processed_downward_messages(a, 3)); + assert!(Dmp::check_processed_downward_messages(a, 1).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, 2).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, 3).is_ok()); // there is no 4 messages in the queue - assert!(!Router::check_processed_downward_messages(a, 4)); + assert!(!Dmp::check_processed_downward_messages(a, 4).is_ok()); }); } @@ -233,19 +350,19 @@ mod tests { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { - assert_eq!(Router::dmq_length(a), 0); + assert_eq!(Dmp::dmq_length(a), 0); queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(a, vec![4, 5, 6]).unwrap(); queue_downward_message(a, vec![7, 8, 9]).unwrap(); - assert_eq!(Router::dmq_length(a), 3); + assert_eq!(Dmp::dmq_length(a), 3); // pruning 0 elements shouldn't change anything. - Router::prune_dmq(a, 0); - assert_eq!(Router::dmq_length(a), 3); + Dmp::prune_dmq(a, 0); + assert_eq!(Dmp::dmq_length(a), 3); - Router::prune_dmq(a, 2); - assert_eq!(Router::dmq_length(a), 1); + Dmp::prune_dmq(a, 2); + assert_eq!(Dmp::dmq_length(a), 1); }); } diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs new file mode 100644 index 0000000000000000000000000000000000000000..8021ebf2e37e68d3f937e4b41e7102eae041ffa3 --- /dev/null +++ b/runtime/parachains/src/hrmp.rs @@ -0,0 +1,1557 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + ensure_parachain, + configuration::{self, HostConfiguration}, + initializer, paras, dmp, +}; +use parity_scale_codec::{Decode, Encode}; +use frame_support::{ + decl_storage, decl_module, decl_error, ensure, traits::Get, weights::Weight, StorageMap, + StorageValue, dispatch::DispatchResult, +}; +use primitives::v1::{ + Balance, Hash, HrmpChannelId, Id as ParaId, InboundHrmpMessage, OutboundHrmpMessage, + SessionIndex, +}; +use sp_runtime::traits::{BlakeTwo256, Hash as HashT}; +use sp_std::{ + mem, fmt, + collections::{btree_map::BTreeMap, btree_set::BTreeSet}, + prelude::*, +}; + +/// A description of a request to open an HRMP channel. +#[derive(Encode, Decode)] +pub struct HrmpOpenChannelRequest { + /// Indicates if this request was confirmed by the recipient. + pub confirmed: bool, + /// How many session boundaries ago this request was seen. + pub age: SessionIndex, + /// The amount that the sender supplied at the time of creation of this request. + pub sender_deposit: Balance, + /// The maximum message size that could be put into the channel. + pub max_message_size: u32, + /// The maximum number of messages that can be pending in the channel at once. + pub max_capacity: u32, + /// The maximum total size of the messages that can be pending in the channel at once. + pub max_total_size: u32, +} + +/// A metadata of an HRMP channel. +#[derive(Encode, Decode)] +#[cfg_attr(test, derive(Debug))] +pub struct HrmpChannel { + /// The amount that the sender supplied as a deposit when opening this channel. + pub sender_deposit: Balance, + /// The amount that the recipient supplied as a deposit when accepting opening this channel. + pub recipient_deposit: Balance, + /// The maximum number of messages that can be pending in the channel at once. + pub max_capacity: u32, + /// The maximum total size of the messages that can be pending in the channel at once. + pub max_total_size: u32, + /// The maximum message size that could be put into the channel. + pub max_message_size: u32, + /// The current number of messages pending in the channel. + /// Invariant: should be less or equal to `max_capacity`.s`. + pub msg_count: u32, + /// The total size in bytes of all message payloads in the channel. + /// Invariant: should be less or equal to `max_total_size`. + pub total_size: u32, + /// A head of the Message Queue Chain for this channel. Each link in this chain has a form: + /// `(prev_head, B, H(M))`, where + /// - `prev_head`: is the previous value of `mqc_head` or zero if none. + /// - `B`: is the [relay-chain] block number in which a message was appended + /// - `H(M)`: is the hash of the message being appended. + /// This value is initialized to a special value that consists of all zeroes which indicates + /// that no messages were previously added. + pub mqc_head: Option, +} + +/// An error returned by [`check_hrmp_watermark`] that indicates an acceptance criteria check +/// didn't pass. +pub enum HrmpWatermarkAcceptanceErr { + AdvancementRule { + new_watermark: BlockNumber, + last_watermark: BlockNumber, + }, + AheadRelayParent { + new_watermark: BlockNumber, + relay_chain_parent_number: BlockNumber, + }, + LandsOnBlockWithNoMessages { + new_watermark: BlockNumber, + }, +} + +/// An error returned by [`check_outbound_hrmp`] that indicates an acceptance criteria check +/// didn't pass. +pub enum OutboundHrmpAcceptanceErr { + MoreMessagesThanPermitted { + sent: u32, + permitted: u32, + }, + NotSorted { + idx: u32, + }, + NoSuchChannel { + idx: u32, + channel_id: HrmpChannelId, + }, + MaxMessageSizeExceeded { + idx: u32, + msg_size: u32, + max_size: u32, + }, + TotalSizeExceeded { + idx: u32, + total_size: u32, + limit: u32, + }, + CapacityExceeded { + idx: u32, + count: u32, + limit: u32, + }, +} + +impl fmt::Debug for HrmpWatermarkAcceptanceErr +where + BlockNumber: fmt::Debug, +{ + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use HrmpWatermarkAcceptanceErr::*; + match self { + AdvancementRule { + new_watermark, + last_watermark, + } => write!( + fmt, + "the HRMP watermark is not advanced relative to the last watermark ({:?} > {:?})", + new_watermark, last_watermark, + ), + AheadRelayParent { + new_watermark, + relay_chain_parent_number, + } => write!( + fmt, + "the HRMP watermark is ahead the relay-parent ({:?} > {:?})", + new_watermark, relay_chain_parent_number + ), + LandsOnBlockWithNoMessages { new_watermark } => write!( + fmt, + "the HRMP watermark ({:?}) doesn't land on a block with messages received", + new_watermark + ), + } + } +} + +impl fmt::Debug for OutboundHrmpAcceptanceErr { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + use OutboundHrmpAcceptanceErr::*; + match self { + MoreMessagesThanPermitted { sent, permitted } => write!( + fmt, + "more HRMP messages than permitted by config ({} > {})", + sent, permitted, + ), + NotSorted { idx } => write!( + fmt, + "the HRMP messages are not sorted (first unsorted is at index {})", + idx, + ), + NoSuchChannel { idx, channel_id } => write!( + fmt, + "the HRMP message at index {} is sent to a non existent channel {:?}->{:?}", + idx, channel_id.sender, channel_id.recipient, + ), + MaxMessageSizeExceeded { + idx, + msg_size, + max_size, + } => write!( + fmt, + "the HRMP message at index {} exceeds the negotiated channel maximum message size ({} > {})", + idx, msg_size, max_size, + ), + TotalSizeExceeded { + idx, + total_size, + limit, + } => write!( + fmt, + "sending the HRMP message at index {} would exceed the neogitiated channel total size ({} > {})", + idx, total_size, limit, + ), + CapacityExceeded { idx, count, limit } => write!( + fmt, + "sending the HRMP message at index {} would exceed the neogitiated channel capacity ({} > {})", + idx, count, limit, + ), + } + } +} + +pub trait Config: frame_system::Config + configuration::Config + paras::Config + dmp::Config { + type Origin: From + + From<::Origin> + + Into::Origin>>; +} + +decl_storage! { + trait Store for Module as Hrmp { + /// Paras that are to be cleaned up at the end of the session. + /// The entries are sorted ascending by the para id. + OutgoingParas: Vec; + + + /// The set of pending HRMP open channel requests. + /// + /// The set is accompanied by a list for iteration. + /// + /// Invariant: + /// - There are no channels that exists in list but not in the set and vice versa. + HrmpOpenChannelRequests: map hasher(twox_64_concat) HrmpChannelId => Option; + HrmpOpenChannelRequestsList: Vec; + + /// This mapping tracks how many open channel requests are inititated by a given sender para. + /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items that has `(X, _)` + /// as the number of `HrmpOpenChannelRequestCount` for `X`. + HrmpOpenChannelRequestCount: map hasher(twox_64_concat) ParaId => u32; + /// This mapping tracks how many open channel requests were accepted by a given recipient para. + /// Invariant: `HrmpOpenChannelRequests` should contain the same number of items `(_, X)` with + /// `confirmed` set to true, as the number of `HrmpAcceptedChannelRequestCount` for `X`. + HrmpAcceptedChannelRequestCount: map hasher(twox_64_concat) ParaId => u32; + + /// A set of pending HRMP close channel requests that are going to be closed during the session change. + /// Used for checking if a given channel is registered for closure. + /// + /// The set is accompanied by a list for iteration. + /// + /// Invariant: + /// - There are no channels that exists in list but not in the set and vice versa. + HrmpCloseChannelRequests: map hasher(twox_64_concat) HrmpChannelId => Option<()>; + HrmpCloseChannelRequestsList: Vec; + + /// The HRMP watermark associated with each para. + /// Invariant: + /// - each para `P` used here as a key should satisfy `Paras::is_valid_para(P)` within a session. + HrmpWatermarks: map hasher(twox_64_concat) ParaId => Option; + /// HRMP channel data associated with each para. + /// Invariant: + /// - each participant in the channel should satisfy `Paras::is_valid_para(P)` within a session. + HrmpChannels: map hasher(twox_64_concat) HrmpChannelId => Option; + /// Ingress/egress indexes allow to find all the senders and receivers given the opposite + /// side. I.e. + /// + /// (a) ingress index allows to find all the senders for a given recipient. + /// (b) egress index allows to find all the recipients for a given sender. + /// + /// Invariants: + /// - for each ingress index entry for `P` each item `I` in the index should present in `HrmpChannels` + /// as `(I, P)`. + /// - for each egress index entry for `P` each item `E` in the index should present in `HrmpChannels` + /// as `(P, E)`. + /// - there should be no other dangling channels in `HrmpChannels`. + /// - the vectors are sorted. + HrmpIngressChannelsIndex: map hasher(twox_64_concat) ParaId => Vec; + HrmpEgressChannelsIndex: map hasher(twox_64_concat) ParaId => Vec; + /// Storage for the messages for each channel. + /// Invariant: cannot be non-empty if the corresponding channel in `HrmpChannels` is `None`. + HrmpChannelContents: map hasher(twox_64_concat) HrmpChannelId => Vec>; + /// Maintains a mapping that can be used to answer the question: + /// What paras sent a message at the given block number for a given reciever. + /// Invariants: + /// - The inner `Vec` is never empty. + /// - The inner `Vec` cannot store two same `ParaId`. + /// - The outer vector is sorted ascending by block number and cannot store two items with the same + /// block number. + HrmpChannelDigests: map hasher(twox_64_concat) ParaId => Vec<(T::BlockNumber, Vec)>; + } +} + +decl_error! { + pub enum Error for Module { + /// The sender tried to open a channel to themselves. + OpenHrmpChannelToSelf, + /// The recipient is not a valid para. + OpenHrmpChannelInvalidRecipient, + /// The requested capacity is zero. + OpenHrmpChannelZeroCapacity, + /// The requested capacity exceeds the global limit. + OpenHrmpChannelCapacityExceedsLimit, + /// The requested maximum message size is 0. + OpenHrmpChannelZeroMessageSize, + /// The open request requested the message size that exceeds the global limit. + OpenHrmpChannelMessageSizeExceedsLimit, + /// The channel already exists + OpenHrmpChannelAlreadyExists, + /// There is already a request to open the same channel. + OpenHrmpChannelAlreadyRequested, + /// The sender already has the maximum number of allowed outbound channels. + OpenHrmpChannelLimitExceeded, + /// The channel from the sender to the origin doesn't exist. + AcceptHrmpChannelDoesntExist, + /// The channel is already confirmed. + AcceptHrmpChannelAlreadyConfirmed, + /// The recipient already has the maximum number of allowed inbound channels. + AcceptHrmpChannelLimitExceeded, + /// The origin tries to close a channel where it is neither the sender nor the recipient. + CloseHrmpChannelUnauthorized, + /// The channel to be closed doesn't exist. + CloseHrmpChannelDoesntExist, + /// The channel close request is already requested. + CloseHrmpChannelAlreadyUnderway, + } +} + +decl_module! { + /// The HRMP module. + pub struct Module for enum Call where origin: ::Origin { + type Error = Error; + + /// Initiate opening a channel from a parachain to a given recipient with given channel + /// parameters. + /// + /// - `proposed_max_capacity` - specifies how many messages can be in the channel at once. + /// - `proposed_max_message_size` - specifies the maximum size of any of the messages. + /// + /// These numbers are a subject to the relay-chain configuration limits. + /// + /// The channel can be opened only after the recipient confirms it and only on a session + /// change. + #[weight = 0] + pub fn hrmp_init_open_channel( + origin, + recipient: ParaId, + proposed_max_capacity: u32, + proposed_max_message_size: u32, + ) -> DispatchResult { + let origin = ensure_parachain(::Origin::from(origin))?; + Self::init_open_channel( + origin, + recipient, + proposed_max_capacity, + proposed_max_message_size + )?; + Ok(()) + } + + /// Accept a pending open channel request from the given sender. + /// + /// The channel will be opened only on the next session boundary. + #[weight = 0] + pub fn hrmp_accept_open_channel(origin, sender: ParaId) -> DispatchResult { + let origin = ensure_parachain(::Origin::from(origin))?; + Self::accept_open_channel(origin, sender)?; + Ok(()) + } + + /// Initiate unilateral closing of a channel. The origin must be either the sender or the + /// recipient in the channel being closed. + /// + /// The closure can only happen on a session change. + #[weight = 0] + pub fn hrmp_close_channel(origin, channel_id: HrmpChannelId) -> DispatchResult { + let origin = ensure_parachain(::Origin::from(origin))?; + Self::close_channel(origin, channel_id)?; + Ok(()) + } + } +} + +/// Routines and getters related to HRMP. +impl Module { + /// Block initialization logic, called by initializer. + pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { + 0 + } + + /// Block finalization logic, called by initializer. + pub(crate) fn initializer_finalize() {} + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + notification: &initializer::SessionChangeNotification, + ) { + Self::perform_outgoing_para_cleanup(); + Self::process_hrmp_open_channel_requests(¬ification.prev_config); + Self::process_hrmp_close_channel_requests(); + } + + /// Iterate over all paras that were registered for offboarding and remove all the data + /// associated with them. + fn perform_outgoing_para_cleanup() { + let outgoing = OutgoingParas::take(); + for outgoing_para in outgoing { + Self::clean_hrmp_after_outgoing(outgoing_para); + } + } + + /// Schedule a para to be cleaned up at the start of the next session. + pub(crate) fn schedule_para_cleanup(id: ParaId) { + OutgoingParas::mutate(|v| { + if let Err(i) = v.binary_search(&id) { + v.insert(i, id); + } + }); + } + + /// Remove all storage entries associated with the given para. + pub(super) fn clean_hrmp_after_outgoing(outgoing_para: ParaId) { + ::HrmpOpenChannelRequestCount::remove(&outgoing_para); + ::HrmpAcceptedChannelRequestCount::remove(&outgoing_para); + + // close all channels where the outgoing para acts as the recipient. + for sender in ::HrmpIngressChannelsIndex::take(&outgoing_para) { + Self::close_hrmp_channel(&HrmpChannelId { + sender, + recipient: outgoing_para.clone(), + }); + } + // close all channels where the outgoing para acts as the sender. + for recipient in ::HrmpEgressChannelsIndex::take(&outgoing_para) { + Self::close_hrmp_channel(&HrmpChannelId { + sender: outgoing_para.clone(), + recipient, + }); + } + } + + /// Iterate over all open channel requests and: + /// + /// - prune the stale requests + /// - enact the confirmed requests + pub(super) fn process_hrmp_open_channel_requests(config: &HostConfiguration) { + let mut open_req_channels = ::HrmpOpenChannelRequestsList::get(); + if open_req_channels.is_empty() { + return; + } + + // iterate the vector starting from the end making our way to the beginning. This way we + // can leverage `swap_remove` to efficiently remove an item during iteration. + let mut idx = open_req_channels.len(); + loop { + // bail if we've iterated over all items. + if idx == 0 { + break; + } + + idx -= 1; + let channel_id = open_req_channels[idx].clone(); + let mut request = ::HrmpOpenChannelRequests::get(&channel_id).expect( + "can't be `None` due to the invariant that the list contains the same items as the set; qed", + ); + + if request.confirmed { + if >::is_valid_para(channel_id.sender) + && >::is_valid_para(channel_id.recipient) + { + ::HrmpChannels::insert( + &channel_id, + HrmpChannel { + sender_deposit: request.sender_deposit, + recipient_deposit: config.hrmp_recipient_deposit, + max_capacity: request.max_capacity, + max_total_size: request.max_total_size, + max_message_size: request.max_message_size, + msg_count: 0, + total_size: 0, + mqc_head: None, + }, + ); + + ::HrmpIngressChannelsIndex::mutate(&channel_id.recipient, |v| { + if let Err(i) = v.binary_search(&channel_id.sender) { + v.insert(i, channel_id.sender); + } + }); + ::HrmpEgressChannelsIndex::mutate(&channel_id.sender, |v| { + if let Err(i) = v.binary_search(&channel_id.recipient) { + v.insert(i, channel_id.recipient); + } + }); + } + + let new_open_channel_req_cnt = + ::HrmpOpenChannelRequestCount::get(&channel_id.sender) + .saturating_sub(1); + if new_open_channel_req_cnt != 0 { + ::HrmpOpenChannelRequestCount::insert( + &channel_id.sender, + new_open_channel_req_cnt, + ); + } else { + ::HrmpOpenChannelRequestCount::remove(&channel_id.sender); + } + + let new_accepted_channel_req_cnt = + ::HrmpAcceptedChannelRequestCount::get(&channel_id.recipient) + .saturating_sub(1); + if new_accepted_channel_req_cnt != 0 { + ::HrmpAcceptedChannelRequestCount::insert( + &channel_id.recipient, + new_accepted_channel_req_cnt, + ); + } else { + ::HrmpAcceptedChannelRequestCount::remove(&channel_id.recipient); + } + + let _ = open_req_channels.swap_remove(idx); + ::HrmpOpenChannelRequests::remove(&channel_id); + } else { + request.age += 1; + if request.age == config.hrmp_open_request_ttl { + // got stale + + ::HrmpOpenChannelRequestCount::mutate(&channel_id.sender, |v| { + *v -= 1; + }); + + // TODO: return deposit https://github.com/paritytech/polkadot/issues/1907 + + let _ = open_req_channels.swap_remove(idx); + ::HrmpOpenChannelRequests::remove(&channel_id); + } + } + } + + ::HrmpOpenChannelRequestsList::put(open_req_channels); + } + + /// Iterate over all close channel requests unconditionally closing the channels. + pub(super) fn process_hrmp_close_channel_requests() { + let close_reqs = ::HrmpCloseChannelRequestsList::take(); + for condemned_ch_id in close_reqs { + ::HrmpCloseChannelRequests::remove(&condemned_ch_id); + Self::close_hrmp_channel(&condemned_ch_id); + + // clean up the indexes. + ::HrmpEgressChannelsIndex::mutate(&condemned_ch_id.sender, |v| { + if let Ok(i) = v.binary_search(&condemned_ch_id.recipient) { + v.remove(i); + } + }); + ::HrmpIngressChannelsIndex::mutate(&condemned_ch_id.recipient, |v| { + if let Ok(i) = v.binary_search(&condemned_ch_id.sender) { + v.remove(i); + } + }); + } + } + + /// Close and remove the designated HRMP channel. + /// + /// This includes returning the deposits. However, it doesn't include updating the ingress/egress + /// indicies. + pub(super) fn close_hrmp_channel(channel_id: &HrmpChannelId) { + // TODO: return deposit https://github.com/paritytech/polkadot/issues/1907 + + ::HrmpChannels::remove(channel_id); + ::HrmpChannelContents::remove(channel_id); + } + + /// Check that the candidate of the given recipient controls the HRMP watermark properly. + pub(crate) fn check_hrmp_watermark( + recipient: ParaId, + relay_chain_parent_number: T::BlockNumber, + new_hrmp_watermark: T::BlockNumber, + ) -> Result<(), HrmpWatermarkAcceptanceErr> { + // First, check where the watermark CANNOT legally land. + // + // (a) For ensuring that messages are eventually, a rule requires each parablock new + // watermark should be greater than the last one. + // + // (b) However, a parachain cannot read into "the future", therefore the watermark should + // not be greater than the relay-chain context block which the parablock refers to. + if let Some(last_watermark) = ::HrmpWatermarks::get(&recipient) { + if new_hrmp_watermark <= last_watermark { + return Err(HrmpWatermarkAcceptanceErr::AdvancementRule { + new_watermark: new_hrmp_watermark, + last_watermark, + }); + } + } + if new_hrmp_watermark > relay_chain_parent_number { + return Err(HrmpWatermarkAcceptanceErr::AheadRelayParent { + new_watermark: new_hrmp_watermark, + relay_chain_parent_number, + }); + } + + // Second, check where the watermark CAN land. It's one of the following: + // + // (a) The relay parent block number. + // (b) A relay-chain block in which this para received at least one message. + if new_hrmp_watermark == relay_chain_parent_number { + Ok(()) + } else { + let digest = ::HrmpChannelDigests::get(&recipient); + if !digest + .binary_search_by_key(&new_hrmp_watermark, |(block_no, _)| *block_no) + .is_ok() + { + return Err(HrmpWatermarkAcceptanceErr::LandsOnBlockWithNoMessages { + new_watermark: new_hrmp_watermark, + }); + } + Ok(()) + } + } + + pub(crate) fn check_outbound_hrmp( + config: &HostConfiguration, + sender: ParaId, + out_hrmp_msgs: &[OutboundHrmpMessage], + ) -> Result<(), OutboundHrmpAcceptanceErr> { + if out_hrmp_msgs.len() as u32 > config.hrmp_max_message_num_per_candidate { + return Err(OutboundHrmpAcceptanceErr::MoreMessagesThanPermitted { + sent: out_hrmp_msgs.len() as u32, + permitted: config.hrmp_max_message_num_per_candidate, + }); + } + + let mut last_recipient = None::; + + for (idx, out_msg) in out_hrmp_msgs + .iter() + .enumerate() + .map(|(idx, out_msg)| (idx as u32, out_msg)) + { + match last_recipient { + // the messages must be sorted in ascending order and there must be no two messages sent + // to the same recipient. Thus we can check that every recipient is strictly greater than + // the previous one. + Some(last_recipient) if out_msg.recipient <= last_recipient => { + return Err(OutboundHrmpAcceptanceErr::NotSorted { idx }); + } + _ => last_recipient = Some(out_msg.recipient), + } + + let channel_id = HrmpChannelId { + sender, + recipient: out_msg.recipient, + }; + + let channel = match ::HrmpChannels::get(&channel_id) { + Some(channel) => channel, + None => { + return Err(OutboundHrmpAcceptanceErr::NoSuchChannel { channel_id, idx }); + } + }; + + let msg_size = out_msg.data.len() as u32; + if msg_size > channel.max_message_size { + return Err(OutboundHrmpAcceptanceErr::MaxMessageSizeExceeded { + idx, + msg_size, + max_size: channel.max_message_size, + }); + } + + let new_total_size = channel.total_size + out_msg.data.len() as u32; + if new_total_size > channel.max_total_size { + return Err(OutboundHrmpAcceptanceErr::TotalSizeExceeded { + idx, + total_size: new_total_size, + limit: channel.max_total_size, + }); + } + + let new_msg_count = channel.msg_count + 1; + if new_msg_count > channel.max_capacity { + return Err(OutboundHrmpAcceptanceErr::CapacityExceeded { + idx, + count: new_msg_count, + limit: channel.max_capacity, + }); + } + } + + Ok(()) + } + + pub(crate) fn prune_hrmp(recipient: ParaId, new_hrmp_watermark: T::BlockNumber) -> Weight { + let mut weight = 0; + + // sift through the incoming messages digest to collect the paras that sent at least one + // message to this parachain between the old and new watermarks. + let senders = ::HrmpChannelDigests::mutate(&recipient, |digest| { + let mut senders = BTreeSet::new(); + let mut leftover = Vec::with_capacity(digest.len()); + for (block_no, paras_sent_msg) in mem::replace(digest, Vec::new()) { + if block_no <= new_hrmp_watermark { + senders.extend(paras_sent_msg); + } else { + leftover.push((block_no, paras_sent_msg)); + } + } + *digest = leftover; + senders + }); + weight += T::DbWeight::get().reads_writes(1, 1); + + // having all senders we can trivially find out the channels which we need to prune. + let channels_to_prune = senders + .into_iter() + .map(|sender| HrmpChannelId { sender, recipient }); + for channel_id in channels_to_prune { + // prune each channel up to the new watermark keeping track how many messages we removed + // and what is the total byte size of them. + let (mut pruned_cnt, mut pruned_size) = (0, 0); + + let contents = ::HrmpChannelContents::get(&channel_id); + let mut leftover = Vec::with_capacity(contents.len()); + for msg in contents { + if msg.sent_at <= new_hrmp_watermark { + pruned_cnt += 1; + pruned_size += msg.data.len(); + } else { + leftover.push(msg); + } + } + if !leftover.is_empty() { + ::HrmpChannelContents::insert(&channel_id, leftover); + } else { + ::HrmpChannelContents::remove(&channel_id); + } + + // update the channel metadata. + ::HrmpChannels::mutate(&channel_id, |channel| { + if let Some(ref mut channel) = channel { + channel.msg_count -= pruned_cnt as u32; + channel.total_size -= pruned_size as u32; + } + }); + + weight += T::DbWeight::get().reads_writes(2, 2); + } + + ::HrmpWatermarks::insert(&recipient, new_hrmp_watermark); + weight += T::DbWeight::get().reads_writes(0, 1); + + weight + } + + /// Process the outbound HRMP messages by putting them into the appropriate recipient queues. + /// + /// Returns the amount of weight consumed. + pub(crate) fn queue_outbound_hrmp( + sender: ParaId, + out_hrmp_msgs: Vec>, + ) -> Weight { + let mut weight = 0; + let now = >::block_number(); + + for out_msg in out_hrmp_msgs { + let channel_id = HrmpChannelId { + sender, + recipient: out_msg.recipient, + }; + + let mut channel = match ::HrmpChannels::get(&channel_id) { + Some(channel) => channel, + None => { + // apparently, that since acceptance of this candidate the recipient was + // offboarded and the channel no longer exists. + continue; + } + }; + + let inbound = InboundHrmpMessage { + sent_at: now, + data: out_msg.data, + }; + + // book keeping + channel.msg_count += 1; + channel.total_size += inbound.data.len() as u32; + + // compute the new MQC head of the channel + let prev_head = channel.mqc_head.clone().unwrap_or(Default::default()); + let new_head = BlakeTwo256::hash_of(&( + prev_head, + inbound.sent_at, + T::Hashing::hash_of(&inbound.data), + )); + channel.mqc_head = Some(new_head); + + ::HrmpChannels::insert(&channel_id, channel); + ::HrmpChannelContents::append(&channel_id, inbound); + + // The digests are sorted in ascending by block number order. Assuming absence of + // contextual execution, there are only two possible scenarios here: + // + // (a) It's the first time anybody sends a message to this recipient within this block. + // In this case, the digest vector would be empty or the block number of the latest + // entry is smaller than the current. + // + // (b) Somebody has already sent a message within the current block. That means that + // the block number of the latest entry is equal to the current. + // + // Note that having the latest entry greater than the current block number is a logical + // error. + let mut recipient_digest = + ::HrmpChannelDigests::get(&channel_id.recipient); + if let Some(cur_block_digest) = recipient_digest + .last_mut() + .filter(|(block_no, _)| *block_no == now) + .map(|(_, ref mut d)| d) + { + cur_block_digest.push(sender); + } else { + recipient_digest.push((now, vec![sender])); + } + ::HrmpChannelDigests::insert(&channel_id.recipient, recipient_digest); + + weight += T::DbWeight::get().reads_writes(2, 2); + } + + weight + } + + /// Initiate opening a channel from a parachain to a given recipient with given channel + /// parameters. + /// + /// Basically the same as [`hrmp_init_open_channel`](Module::hrmp_init_open_channel) but intendend for calling directly from + /// other pallets rather than dispatched. + pub fn init_open_channel( + origin: ParaId, + recipient: ParaId, + proposed_max_capacity: u32, + proposed_max_message_size: u32, + ) -> Result<(), Error> { + ensure!(origin != recipient, Error::::OpenHrmpChannelToSelf); + ensure!( + >::is_valid_para(recipient), + Error::::OpenHrmpChannelInvalidRecipient, + ); + + let config = >::config(); + ensure!( + proposed_max_capacity > 0, + Error::::OpenHrmpChannelZeroCapacity, + ); + ensure!( + proposed_max_capacity <= config.hrmp_channel_max_capacity, + Error::::OpenHrmpChannelCapacityExceedsLimit, + ); + ensure!( + proposed_max_message_size > 0, + Error::::OpenHrmpChannelZeroMessageSize, + ); + ensure!( + proposed_max_message_size <= config.hrmp_channel_max_message_size, + Error::::OpenHrmpChannelMessageSizeExceedsLimit, + ); + + let channel_id = HrmpChannelId { + sender: origin, + recipient, + }; + ensure!( + ::HrmpOpenChannelRequests::get(&channel_id).is_none(), + Error::::OpenHrmpChannelAlreadyExists, + ); + ensure!( + ::HrmpChannels::get(&channel_id).is_none(), + Error::::OpenHrmpChannelAlreadyRequested, + ); + + let egress_cnt = + ::HrmpEgressChannelsIndex::decode_len(&origin).unwrap_or(0) as u32; + let open_req_cnt = ::HrmpOpenChannelRequestCount::get(&origin); + let channel_num_limit = if >::is_parathread(origin) { + config.hrmp_max_parathread_outbound_channels + } else { + config.hrmp_max_parachain_outbound_channels + }; + ensure!( + egress_cnt + open_req_cnt < channel_num_limit, + Error::::OpenHrmpChannelLimitExceeded, + ); + + // TODO: Deposit https://github.com/paritytech/polkadot/issues/1907 + + ::HrmpOpenChannelRequestCount::insert(&origin, open_req_cnt + 1); + ::HrmpOpenChannelRequests::insert( + &channel_id, + HrmpOpenChannelRequest { + confirmed: false, + age: 0, + sender_deposit: config.hrmp_sender_deposit, + max_capacity: proposed_max_capacity, + max_message_size: proposed_max_message_size, + max_total_size: config.hrmp_channel_max_total_size, + }, + ); + ::HrmpOpenChannelRequestsList::append(channel_id); + + let notification_bytes = { + use xcm::v0::Xcm; + use parity_scale_codec::Encode as _; + + Xcm::HrmpNewChannelOpenRequest { + sender: u32::from(origin), + max_capacity: proposed_max_capacity, + max_message_size: proposed_max_message_size, + } + .encode() + }; + if let Err(dmp::QueueDownwardMessageError::ExceedsMaxMessageSize) = + >::queue_downward_message(&config, recipient, notification_bytes) + { + // this should never happen unless the max downward message size is configured to an + // jokingly small number. + debug_assert!(false); + } + + Ok(()) + } + + /// Accept a pending open channel request from the given sender. + /// + /// Basically the same as [`hrmp_accept_open_channel`](Module::hrmp_accept_open_channel) but intendend for calling directly from + /// other pallets rather than dispatched. + pub fn accept_open_channel(origin: ParaId, sender: ParaId) -> Result<(), Error> { + let channel_id = HrmpChannelId { + sender, + recipient: origin, + }; + let mut channel_req = ::HrmpOpenChannelRequests::get(&channel_id) + .ok_or(Error::::AcceptHrmpChannelDoesntExist)?; + ensure!( + !channel_req.confirmed, + Error::::AcceptHrmpChannelAlreadyConfirmed, + ); + + // check if by accepting this open channel request, this parachain would exceed the + // number of inbound channels. + let config = >::config(); + let channel_num_limit = if >::is_parathread(origin) { + config.hrmp_max_parathread_inbound_channels + } else { + config.hrmp_max_parachain_inbound_channels + }; + let ingress_cnt = + ::HrmpIngressChannelsIndex::decode_len(&origin).unwrap_or(0) as u32; + let accepted_cnt = ::HrmpAcceptedChannelRequestCount::get(&origin); + ensure!( + ingress_cnt + accepted_cnt < channel_num_limit, + Error::::AcceptHrmpChannelLimitExceeded, + ); + + // TODO: Deposit https://github.com/paritytech/polkadot/issues/1907 + + // persist the updated open channel request and then increment the number of accepted + // channels. + channel_req.confirmed = true; + ::HrmpOpenChannelRequests::insert(&channel_id, channel_req); + ::HrmpAcceptedChannelRequestCount::insert(&origin, accepted_cnt + 1); + + let notification_bytes = { + use parity_scale_codec::Encode as _; + use xcm::v0::Xcm; + + Xcm::HrmpChannelAccepted { + recipient: u32::from(origin), + } + .encode() + }; + if let Err(dmp::QueueDownwardMessageError::ExceedsMaxMessageSize) = + >::queue_downward_message(&config, sender, notification_bytes) + { + // this should never happen unless the max downward message size is configured to an + // jokingly small number. + debug_assert!(false); + } + + Ok(()) + } + + fn close_channel(origin: ParaId, channel_id: HrmpChannelId) -> Result<(), Error> { + // check if the origin is allowed to close the channel. + ensure!( + origin == channel_id.sender || origin == channel_id.recipient, + Error::::CloseHrmpChannelUnauthorized, + ); + + // check if the channel requested to close does exist. + ensure!( + ::HrmpChannels::get(&channel_id).is_some(), + Error::::CloseHrmpChannelDoesntExist, + ); + + // check that there is no outstanding close request for this channel + ensure!( + ::HrmpCloseChannelRequests::get(&channel_id).is_none(), + Error::::CloseHrmpChannelAlreadyUnderway, + ); + + ::HrmpCloseChannelRequests::insert(&channel_id, ()); + ::HrmpCloseChannelRequestsList::append(channel_id.clone()); + + let config = >::config(); + let notification_bytes = { + use parity_scale_codec::Encode as _; + use xcm::v0::Xcm; + + Xcm::HrmpChannelClosing { + initiator: u32::from(origin), + sender: u32::from(channel_id.sender), + recipient: u32::from(channel_id.recipient), + } + .encode() + }; + let opposite_party = if origin == channel_id.sender { + channel_id.recipient + } else { + channel_id.sender + }; + if let Err(dmp::QueueDownwardMessageError::ExceedsMaxMessageSize) = + >::queue_downward_message(&config, opposite_party, notification_bytes) + { + // this should never happen unless the max downward message size is configured to an + // jokingly small number. + debug_assert!(false); + } + + Ok(()) + } + + /// Returns the list of MQC heads for the inbound channels of the given recipient para paired + /// with the sender para ids. This vector is sorted ascending by the para id and doesn't contain + /// multiple entries with the same sender. + pub(crate) fn hrmp_mqc_heads(recipient: ParaId) -> Vec<(ParaId, Hash)> { + let sender_set = ::HrmpIngressChannelsIndex::get(&recipient); + + // The ingress channels vector is sorted, thus `mqc_heads` is sorted as well. + let mut mqc_heads = Vec::with_capacity(sender_set.len()); + for sender in sender_set { + let channel_metadata = + ::HrmpChannels::get(&HrmpChannelId { sender, recipient }); + let mqc_head = channel_metadata + .and_then(|metadata| metadata.mqc_head) + .unwrap_or(Hash::default()); + mqc_heads.push((sender, mqc_head)); + } + + mqc_heads + } + + /// Returns contents of all channels addressed to the given recipient. Channels that have no + /// messages in them are also included. + pub(crate) fn inbound_hrmp_channels_contents( + recipient: ParaId, + ) -> BTreeMap>> { + let sender_set = ::HrmpIngressChannelsIndex::get(&recipient); + + let mut inbound_hrmp_channels_contents = BTreeMap::new(); + for sender in sender_set { + let channel_contents = + ::HrmpChannelContents::get(&HrmpChannelId { sender, recipient }); + inbound_hrmp_channels_contents.insert(sender, channel_contents); + } + + inbound_hrmp_channels_contents + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{ + new_test_ext, Configuration, Paras, Hrmp, System, GenesisConfig as MockGenesisConfig, + }; + use primitives::v1::BlockNumber; + use std::collections::{BTreeMap, HashSet}; + + fn run_to_block(to: BlockNumber, new_session: Option>) { + use frame_support::traits::{OnFinalize as _, OnInitialize as _}; + + while System::block_number() < to { + let b = System::block_number(); + + // NOTE: this is in reverse initialization order. + Hrmp::initializer_finalize(); + Paras::initializer_finalize(); + + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) { + // NOTE: this is in initialization order. + Paras::initializer_on_new_session(&Default::default()); + Hrmp::initializer_on_new_session(&Default::default()); + } + + // NOTE: this is in initialization order. + Paras::initializer_initialize(b + 1); + Hrmp::initializer_initialize(b + 1); + } + } + + struct GenesisConfigBuilder { + hrmp_channel_max_capacity: u32, + hrmp_channel_max_message_size: u32, + hrmp_max_parathread_outbound_channels: u32, + hrmp_max_parachain_outbound_channels: u32, + hrmp_max_parathread_inbound_channels: u32, + hrmp_max_parachain_inbound_channels: u32, + hrmp_max_message_num_per_candidate: u32, + hrmp_channel_max_total_size: u32, + } + + impl Default for GenesisConfigBuilder { + fn default() -> Self { + Self { + hrmp_channel_max_capacity: 2, + hrmp_channel_max_message_size: 8, + hrmp_max_parathread_outbound_channels: 1, + hrmp_max_parachain_outbound_channels: 2, + hrmp_max_parathread_inbound_channels: 1, + hrmp_max_parachain_inbound_channels: 2, + hrmp_max_message_num_per_candidate: 2, + hrmp_channel_max_total_size: 16, + } + } + } + + impl GenesisConfigBuilder { + fn build(self) -> crate::mock::GenesisConfig { + let mut genesis = default_genesis_config(); + let config = &mut genesis.configuration.config; + config.hrmp_channel_max_capacity = self.hrmp_channel_max_capacity; + config.hrmp_channel_max_message_size = self.hrmp_channel_max_message_size; + config.hrmp_max_parathread_outbound_channels = + self.hrmp_max_parathread_outbound_channels; + config.hrmp_max_parachain_outbound_channels = self.hrmp_max_parachain_outbound_channels; + config.hrmp_max_parathread_inbound_channels = self.hrmp_max_parathread_inbound_channels; + config.hrmp_max_parachain_inbound_channels = self.hrmp_max_parachain_inbound_channels; + config.hrmp_max_message_num_per_candidate = self.hrmp_max_message_num_per_candidate; + config.hrmp_channel_max_total_size = self.hrmp_channel_max_total_size; + genesis + } + } + + fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { + max_downward_message_size: 1024, + ..Default::default() + }, + }, + ..Default::default() + } + } + + fn register_parachain(id: ParaId) { + Paras::schedule_para_initialize( + id, + crate::paras::ParaGenesisArgs { + parachain: true, + genesis_head: vec![1].into(), + validation_code: vec![1].into(), + }, + ); + } + + fn deregister_parachain(id: ParaId) { + Paras::schedule_para_cleanup(id); + } + + fn channel_exists(sender: ParaId, recipient: ParaId) -> bool { + ::HrmpChannels::get(&HrmpChannelId { sender, recipient }).is_some() + } + + fn assert_storage_consistency_exhaustive() { + use frame_support::IterableStorageMap; + + assert_eq!( + ::HrmpOpenChannelRequests::iter() + .map(|(k, _)| k) + .collect::>(), + ::HrmpOpenChannelRequestsList::get() + .into_iter() + .collect::>(), + ); + + // verify that the set of keys in `HrmpOpenChannelRequestCount` corresponds to the set + // of _senders_ in `HrmpOpenChannelRequests`. + // + // having ensured that, we can go ahead and go over all counts and verify that they match. + assert_eq!( + ::HrmpOpenChannelRequestCount::iter() + .map(|(k, _)| k) + .collect::>(), + ::HrmpOpenChannelRequests::iter() + .map(|(k, _)| k.sender) + .collect::>(), + ); + for (open_channel_initiator, expected_num) in + ::HrmpOpenChannelRequestCount::iter() + { + let actual_num = ::HrmpOpenChannelRequests::iter() + .filter(|(ch, _)| ch.sender == open_channel_initiator) + .count() as u32; + assert_eq!(expected_num, actual_num); + } + + // The same as above, but for accepted channel request count. Note that we are interested + // only in confirmed open requests. + assert_eq!( + ::HrmpAcceptedChannelRequestCount::iter() + .map(|(k, _)| k) + .collect::>(), + ::HrmpOpenChannelRequests::iter() + .filter(|(_, v)| v.confirmed) + .map(|(k, _)| k.recipient) + .collect::>(), + ); + for (channel_recipient, expected_num) in + ::HrmpAcceptedChannelRequestCount::iter() + { + let actual_num = ::HrmpOpenChannelRequests::iter() + .filter(|(ch, v)| ch.recipient == channel_recipient && v.confirmed) + .count() as u32; + assert_eq!(expected_num, actual_num); + } + + assert_eq!( + ::HrmpCloseChannelRequests::iter() + .map(|(k, _)| k) + .collect::>(), + ::HrmpCloseChannelRequestsList::get() + .into_iter() + .collect::>(), + ); + + // A HRMP watermark can be None for an onboarded parachain. However, an offboarded parachain + // cannot have an HRMP watermark: it should've been cleanup. + assert_contains_only_onboarded( + ::HrmpWatermarks::iter().map(|(k, _)| k), + "HRMP watermarks should contain only onboarded paras", + ); + + // An entry in `HrmpChannels` indicates that the channel is open. Only open channels can + // have contents. + for (non_empty_channel, contents) in ::HrmpChannelContents::iter() { + assert!(::HrmpChannels::contains_key( + &non_empty_channel + )); + + // pedantic check: there should be no empty vectors in storage, those should be modeled + // by a removed kv pair. + assert!(!contents.is_empty()); + } + + // Senders and recipients must be onboarded. Otherwise, all channels associated with them + // are removed. + assert_contains_only_onboarded( + ::HrmpChannels::iter().flat_map(|(k, _)| vec![k.sender, k.recipient]), + "senders and recipients in all channels should be onboarded", + ); + + // Check the docs for `HrmpIngressChannelsIndex` and `HrmpEgressChannelsIndex` in decl + // storage to get an index what are the channel mappings indexes. + // + // Here, from indexes. + // + // ingress egress + // + // a -> [x, y] x -> [a, b] + // b -> [x, z] y -> [a] + // z -> [b] + // + // we derive a list of channels they represent. + // + // (a, x) (a, x) + // (a, y) (a, y) + // (b, x) (b, x) + // (b, z) (b, z) + // + // and then that we compare that to the channel list in the `HrmpChannels`. + let channel_set_derived_from_ingress = ::HrmpIngressChannelsIndex::iter() + .flat_map(|(p, v)| v.into_iter().map(|i| (i, p)).collect::>()) + .collect::>(); + let channel_set_derived_from_egress = ::HrmpEgressChannelsIndex::iter() + .flat_map(|(p, v)| v.into_iter().map(|e| (p, e)).collect::>()) + .collect::>(); + let channel_set_ground_truth = ::HrmpChannels::iter() + .map(|(k, _)| (k.sender, k.recipient)) + .collect::>(); + assert_eq!( + channel_set_derived_from_ingress, + channel_set_derived_from_egress + ); + assert_eq!(channel_set_derived_from_egress, channel_set_ground_truth); + + ::HrmpIngressChannelsIndex::iter() + .map(|(_, v)| v) + .for_each(|v| assert_is_sorted(&v, "HrmpIngressChannelsIndex")); + ::HrmpEgressChannelsIndex::iter() + .map(|(_, v)| v) + .for_each(|v| assert_is_sorted(&v, "HrmpIngressChannelsIndex")); + + assert_contains_only_onboarded( + ::HrmpChannelDigests::iter().map(|(k, _)| k), + "HRMP channel digests should contain only onboarded paras", + ); + for (_digest_for_para, digest) in ::HrmpChannelDigests::iter() { + // Assert that items are in **strictly** ascending order. The strictness also implies + // there are no duplicates. + assert!(digest.windows(2).all(|xs| xs[0].0 < xs[1].0)); + + for (_, mut senders) in digest { + assert!(!senders.is_empty()); + + // check for duplicates. For that we sort the vector, then perform deduplication. + // if the vector stayed the same, there are no duplicates. + senders.sort(); + let orig_senders = senders.clone(); + senders.dedup(); + assert_eq!( + orig_senders, senders, + "duplicates removed implies existence of duplicates" + ); + } + } + + fn assert_contains_only_onboarded(iter: impl Iterator, cause: &str) { + for para in iter { + assert!( + Paras::is_valid_para(para), + "{}: {} para is offboarded", + cause, + para + ); + } + } + } + + fn assert_is_sorted(slice: &[T], id: &str) { + assert!( + slice.windows(2).all(|xs| xs[0] <= xs[1]), + "{} supposed to be sorted", + id + ); + } + + #[test] + fn empty_state_consistent_state() { + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + assert_storage_consistency_exhaustive(); + }); + } + + #[test] + fn open_channel_works() { + let para_a = 1.into(); + let para_b = 3.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + // We need both A & B to be registered and alive parachains. + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![5])); + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); + assert_storage_consistency_exhaustive(); + + Hrmp::accept_open_channel(para_b, para_a).unwrap(); + assert_storage_consistency_exhaustive(); + + // Advance to a block 6, but without session change. That means that the channel has + // not been created yet. + run_to_block(6, None); + assert!(!channel_exists(para_a, para_b)); + assert_storage_consistency_exhaustive(); + + // Now let the session change happen and thus open the channel. + run_to_block(8, Some(vec![8])); + assert!(channel_exists(para_a, para_b)); + }); + } + + #[test] + fn close_channel_works() { + let para_a = 5.into(); + let para_b = 2.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![5])); + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); + + run_to_block(6, Some(vec![6])); + assert!(channel_exists(para_a, para_b)); + + // Close the channel. The effect is not immediate, but rather deferred to the next + // session change. + Hrmp::close_channel( + para_b, + HrmpChannelId { + sender: para_a, + recipient: para_b, + }, + ) + .unwrap(); + assert!(channel_exists(para_a, para_b)); + assert_storage_consistency_exhaustive(); + + // After the session change the channel should be closed. + run_to_block(8, Some(vec![8])); + assert!(!channel_exists(para_a, para_b)); + assert_storage_consistency_exhaustive(); + }); + } + + #[test] + fn send_recv_messages() { + let para_a = 32.into(); + let para_b = 64.into(); + + let mut genesis = GenesisConfigBuilder::default(); + genesis.hrmp_channel_max_message_size = 20; + genesis.hrmp_channel_max_total_size = 20; + new_test_ext(genesis.build()).execute_with(|| { + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![5])); + Hrmp::init_open_channel(para_a, para_b, 2, 20).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); + + // On Block 6: + // A sends a message to B + run_to_block(6, Some(vec![6])); + assert!(channel_exists(para_a, para_b)); + let msgs = vec![OutboundHrmpMessage { + recipient: para_b, + data: b"this is an emergency".to_vec(), + }]; + let config = Configuration::config(); + assert!(Hrmp::check_outbound_hrmp(&config, para_a, &msgs).is_ok()); + let _ = Hrmp::queue_outbound_hrmp(para_a, msgs); + assert_storage_consistency_exhaustive(); + + // On Block 7: + // B receives the message sent by A. B sets the watermark to 6. + run_to_block(7, None); + assert!(Hrmp::check_hrmp_watermark(para_b, 7, 6).is_ok()); + let _ = Hrmp::prune_hrmp(para_b, 6); + assert_storage_consistency_exhaustive(); + }); + } + + #[test] + fn accept_incoming_request_and_offboard() { + let para_a = 32.into(); + let para_b = 64.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + register_parachain(para_a); + register_parachain(para_b); + + run_to_block(5, Some(vec![5])); + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); + deregister_parachain(para_a); + + // On Block 6: session change. The channel should not be created. + run_to_block(6, Some(vec![6])); + assert!(!Paras::is_valid_para(para_a)); + assert!(!channel_exists(para_a, para_b)); + assert_storage_consistency_exhaustive(); + }); + } + + #[test] + fn check_sent_messages() { + let para_a = 32.into(); + let para_b = 64.into(); + let para_c = 97.into(); + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + register_parachain(para_a); + register_parachain(para_b); + register_parachain(para_c); + + run_to_block(5, Some(vec![5])); + + // Open two channels to the same receiver, b: + // a -> b, c -> b + Hrmp::init_open_channel(para_a, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_a).unwrap(); + Hrmp::init_open_channel(para_c, para_b, 2, 8).unwrap(); + Hrmp::accept_open_channel(para_b, para_c).unwrap(); + + // On Block 6: session change. + run_to_block(6, Some(vec![6])); + assert!(Paras::is_valid_para(para_a)); + + let msgs = vec![OutboundHrmpMessage { + recipient: para_b, + data: b"knock".to_vec(), + }]; + let config = Configuration::config(); + assert!(Hrmp::check_outbound_hrmp(&config, para_a, &msgs).is_ok()); + let _ = Hrmp::queue_outbound_hrmp(para_a, msgs.clone()); + + // Verify that the sent messages are there and that also the empty channels are present. + let mqc_heads = Hrmp::hrmp_mqc_heads(para_b); + let contents = Hrmp::inbound_hrmp_channels_contents(para_b); + assert_eq!( + contents, + vec![ + ( + para_a, + vec![InboundHrmpMessage { + sent_at: 6, + data: b"knock".to_vec(), + }] + ), + (para_c, vec![]) + ] + .into_iter() + .collect::>(), + ); + assert_eq!( + mqc_heads, + vec![ + ( + para_a, + hex_literal::hex!( + "3bba6404e59c91f51deb2ae78f1273ebe75896850713e13f8c0eba4b0996c483" + ) + .into() + ), + (para_c, Default::default()) + ], + ); + + assert_storage_consistency_exhaustive(); + }); + } +} diff --git a/runtime/parachains/src/inclusion.rs b/runtime/parachains/src/inclusion.rs index e5e831bcc38b5bcd1099f63d71e47b0034ba031f..0b05f78064415ec3c82aa679eebcb5edec5717b2 100644 --- a/runtime/parachains/src/inclusion.rs +++ b/runtime/parachains/src/inclusion.rs @@ -31,12 +31,12 @@ use frame_support::{ decl_storage, decl_module, decl_error, decl_event, ensure, debug, dispatch::DispatchResult, IterableStorageMap, weights::Weight, traits::Get, }; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use sp_staking::SessionIndex; use sp_runtime::{DispatchError, traits::{One, Saturating}}; -use crate::{configuration, paras, router, scheduler::CoreAssignment}; +use crate::{configuration, paras, dmp, ump, hrmp, scheduler::CoreAssignment}; /// A bitfield signed by a validator indicating that it is keeping its piece of the erasure-coding /// for any backed candidates referred to by a `1` bit available. @@ -85,14 +85,19 @@ impl CandidatePendingAvailability { } } -pub trait Trait: - frame_system::Trait + paras::Trait + router::Trait + configuration::Trait +pub trait Config: + frame_system::Config + + paras::Config + + dmp::Config + + ump::Config + + hrmp::Config + + configuration::Config { - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; } decl_storage! { - trait Store for Module as ParaInclusion { + trait Store for Module as ParaInclusion { /// The latest bitfield for each validator, referred to by their index in the validator set. AvailabilityBitfields: map hasher(twox_64_concat) ValidatorIndex => Option>; @@ -114,7 +119,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Availability bitfield has unexpected size. WrongBitfieldSize, /// Multiple bitfields submitted by same validator or validators out of order by index. @@ -155,11 +160,17 @@ decl_error! { InternalError, /// The downward message queue is not processed correctly. IncorrectDownwardMessageHandling, + /// At least one upward message sent does not pass the acceptance criteria. + InvalidUpwardMessages, + /// The candidate didn't follow the rules of HRMP watermark advancement. + HrmpWatermarkMishandling, + /// The HRMP messages sent by the candidate is not valid. + InvalidOutboundHrmp, } } decl_event! { - pub enum Event where ::Hash { + pub enum Event where ::Hash { /// A candidate was backed. [candidate, head_data] CandidateBacked(CandidateReceipt, HeadData), /// A candidate was included. [candidate, head_data] @@ -171,8 +182,8 @@ decl_event! { decl_module! { /// The parachain-candidate inclusion module. - pub struct Module - for enum Call where origin: ::Origin + pub struct Module + for enum Call where origin: ::Origin { type Error = Error; @@ -180,8 +191,9 @@ decl_module! { } } -impl Module { +const LOG_TARGET: &str = "parachains_runtime_inclusion"; +impl Module { /// Block initialization logic, called by initializer. pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { 0 } @@ -310,7 +322,7 @@ impl Module { { if pending_availability.availability_votes.count_ones() >= threshold { >::remove(¶_id); - let commitments = match ::take(¶_id) { + let commitments = match PendingAvailabilityCommitments::take(¶_id) { Some(commitments) => commitments, None => { debug::warn!(r#" @@ -394,7 +406,7 @@ impl Module { // In the meantime, we do certain sanity checks on the candidates and on the scheduled // list. 'a: - for candidate in &candidates { + for (candidate_idx, candidate) in candidates.iter().enumerate() { let para_id = candidate.descriptor().para_id; // we require that the candidate is in the context of the parent block. @@ -407,12 +419,27 @@ impl Module { Error::::NotCollatorSigned, ); - check_cx.check_validation_outputs( - para_id, - &candidate.candidate.commitments.head_data, - &candidate.candidate.commitments.new_validation_code, - candidate.candidate.commitments.processed_downward_messages, - )?; + if let Err(err) = check_cx + .check_validation_outputs( + para_id, + &candidate.candidate.commitments.head_data, + &candidate.candidate.commitments.new_validation_code, + candidate.candidate.commitments.processed_downward_messages, + &candidate.candidate.commitments.upward_messages, + T::BlockNumber::from(candidate.candidate.commitments.hrmp_watermark), + &candidate.candidate.commitments.horizontal_messages, + ) + { + frame_support::debug::RuntimeLogger::init(); + log::debug!( + target: LOG_TARGET, + "Validation outputs checking during inclusion of a candidate {} for parachain `{}` failed: {:?}", + candidate_idx, + u32::from(para_id), + err, + ); + Err(err.strip_into_dispatch_err::())?; + }; for (i, assignment) in scheduled[skip..].iter().enumerate() { check_assignment_in_order(assignment)?; @@ -533,18 +560,30 @@ impl Module { } /// Run the acceptance criteria checks on the given candidate commitments. - /// - /// Returns an 'Err` if any of the checks doesn't pass. pub(crate) fn check_validation_outputs( para_id: ParaId, - validation_outputs: primitives::v1::ValidationOutputs, - ) -> Result<(), DispatchError> { - CandidateCheckContext::::new().check_validation_outputs( + validation_outputs: primitives::v1::CandidateCommitments, + ) -> bool { + if let Err(err) = CandidateCheckContext::::new().check_validation_outputs( para_id, &validation_outputs.head_data, &validation_outputs.new_validation_code, validation_outputs.processed_downward_messages, - ) + &validation_outputs.upward_messages, + T::BlockNumber::from(validation_outputs.hrmp_watermark), + &validation_outputs.horizontal_messages, + ) { + frame_support::debug::RuntimeLogger::init(); + log::debug!( + target: LOG_TARGET, + "Validation outputs checking for parachain `{}` failed: {:?}", + u32::from(para_id), + err, + ); + false + } else { + true + } } fn enact_candidate( @@ -566,10 +605,22 @@ impl Module { } // enact the messaging facet of the candidate. - weight += >::prune_dmq( + weight += >::prune_dmq( receipt.descriptor.para_id, commitments.processed_downward_messages, ); + weight += >::enact_upward_messages( + receipt.descriptor.para_id, + commitments.upward_messages, + ); + weight += >::prune_hrmp( + receipt.descriptor.para_id, + T::BlockNumber::from(commitments.hrmp_watermark), + ); + weight += >::queue_outbound_hrmp( + receipt.descriptor.para_id, + commitments.horizontal_messages, + ); Self::deposit_event( Event::::CandidateIncluded(plain, commitments.head_data.clone()) @@ -668,14 +719,42 @@ const fn availability_threshold(n_validators: usize) -> usize { threshold } +#[derive(derive_more::From, Debug)] +enum AcceptanceCheckErr { + HeadDataTooLarge, + PrematureCodeUpgrade, + NewCodeTooLarge, + ProcessedDownwardMessages(dmp::ProcessedDownwardMessagesAcceptanceErr), + UpwardMessages(ump::AcceptanceCheckErr), + HrmpWatermark(hrmp::HrmpWatermarkAcceptanceErr), + OutboundHrmp(hrmp::OutboundHrmpAcceptanceErr), +} + +impl AcceptanceCheckErr { + /// Returns the same error so that it can be threaded through a needle of `DispatchError` and + /// ultimately returned from a `Dispatchable`. + fn strip_into_dispatch_err(self) -> Error { + use AcceptanceCheckErr::*; + match self { + HeadDataTooLarge => Error::::HeadDataTooLarge, + PrematureCodeUpgrade => Error::::PrematureCodeUpgrade, + NewCodeTooLarge => Error::::NewCodeTooLarge, + ProcessedDownwardMessages(_) => Error::::IncorrectDownwardMessageHandling, + UpwardMessages(_) => Error::::InvalidUpwardMessages, + HrmpWatermark(_) => Error::::HrmpWatermarkMishandling, + OutboundHrmp(_) => Error::::InvalidOutboundHrmp, + } + } +} + /// A collection of data required for checking a candidate. -struct CandidateCheckContext { +struct CandidateCheckContext { config: configuration::HostConfiguration, now: T::BlockNumber, relay_parent_number: T::BlockNumber, } -impl CandidateCheckContext { +impl CandidateCheckContext { fn new() -> Self { let now = >::block_number(); Self { @@ -693,10 +772,13 @@ impl CandidateCheckContext { head_data: &HeadData, new_validation_code: &Option, processed_downward_messages: u32, - ) -> Result<(), DispatchError> { + upward_messages: &[primitives::v1::UpwardMessage], + hrmp_watermark: T::BlockNumber, + horizontal_messages: &[primitives::v1::OutboundHrmpMessage], + ) -> Result<(), AcceptanceCheckErr> { ensure!( head_data.0.len() <= self.config.max_head_data_size as _, - Error::::HeadDataTooLarge + AcceptanceCheckErr::HeadDataTooLarge, ); // if any, the code upgrade attempt is allowed. @@ -707,21 +789,28 @@ impl CandidateCheckContext { && self.relay_parent_number.saturating_sub(last) >= self.config.validation_upgrade_frequency }); - ensure!(valid_upgrade_attempt, Error::::PrematureCodeUpgrade); + ensure!( + valid_upgrade_attempt, + AcceptanceCheckErr::PrematureCodeUpgrade, + ); ensure!( new_validation_code.0.len() <= self.config.max_code_size as _, - Error::::NewCodeTooLarge + AcceptanceCheckErr::NewCodeTooLarge, ); } // check if the candidate passes the messaging acceptance criteria - ensure!( - >::check_processed_downward_messages( - para_id, - processed_downward_messages, - ), - Error::::IncorrectDownwardMessageHandling, - ); + >::check_processed_downward_messages( + para_id, + processed_downward_messages, + )?; + >::check_upward_messages(&self.config, para_id, upward_messages)?; + >::check_hrmp_watermark( + para_id, + self.relay_parent_number, + hrmp_watermark, + )?; + >::check_outbound_hrmp(&self.config, para_id, horizontal_messages)?; Ok(()) } @@ -929,6 +1018,7 @@ mod tests { relay_parent: Hash, persisted_validation_data_hash: Hash, new_validation_code: Option, + hrmp_watermark: BlockNumber, } impl TestCandidateBuilder { @@ -944,6 +1034,7 @@ mod tests { commitments: CandidateCommitments { head_data: self.head_data, new_validation_code: self.new_validation_code, + hrmp_watermark: self.hrmp_watermark, ..Default::default() }, } @@ -1342,6 +1433,9 @@ mod tests { let chain_b = ParaId::from(2); let thread_a = ParaId::from(3); + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + let paras = vec![(chain_a, true), (chain_b, true), (thread_a, false)]; let validators = vec![ Sr25519Keyring::Alice, @@ -1404,6 +1498,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); collator_sign_candidate( @@ -1437,6 +1532,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); let mut candidate_b = TestCandidateBuilder { @@ -1444,6 +1540,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([2; 32]), persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); @@ -1493,6 +1590,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); collator_sign_candidate( @@ -1562,6 +1660,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); @@ -1601,6 +1700,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); @@ -1639,6 +1739,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); @@ -1686,6 +1787,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); @@ -1726,6 +1828,7 @@ mod tests { pov_hash: Hash::from([1; 32]), new_validation_code: Some(vec![5, 6, 7, 8].into()), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); @@ -1768,6 +1871,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: [42u8; 32].into(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); @@ -1803,6 +1907,9 @@ mod tests { let chain_b = ParaId::from(2); let thread_a = ParaId::from(3); + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + let paras = vec![(chain_a, true), (chain_b, true), (thread_a, false)]; let validators = vec![ Sr25519Keyring::Alice, @@ -1863,6 +1970,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); collator_sign_candidate( @@ -1875,6 +1983,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([2; 32]), persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); collator_sign_candidate( @@ -1887,6 +1996,7 @@ mod tests { relay_parent: System::parent_hash(), pov_hash: Hash::from([3; 32]), persisted_validation_data_hash: make_vdata_hash(thread_a).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); collator_sign_candidate( @@ -1984,6 +2094,9 @@ mod tests { fn can_include_candidate_with_ok_code_upgrade() { let chain_a = ParaId::from(1); + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + let paras = vec![(chain_a, true)]; let validators = vec![ Sr25519Keyring::Alice, @@ -2027,6 +2140,7 @@ mod tests { pov_hash: Hash::from([1; 32]), persisted_validation_data_hash: make_vdata_hash(chain_a).unwrap(), new_validation_code: Some(vec![1, 2, 3].into()), + hrmp_watermark: RELAY_PARENT_NUM, ..Default::default() }.build(); collator_sign_candidate( diff --git a/runtime/parachains/src/inclusion_inherent.rs b/runtime/parachains/src/inclusion_inherent.rs index f9a7465d91282d60274c53d121af203df8aaaa2e..bb25f5c80f69e146283378e4938d7e0d94e296f9 100644 --- a/runtime/parachains/src/inclusion_inherent.rs +++ b/runtime/parachains/src/inclusion_inherent.rs @@ -35,13 +35,14 @@ use frame_system::ensure_none; use crate::{ inclusion, scheduler::{self, FreedReason}, + ump, }; use inherents::{InherentIdentifier, InherentData, MakeFatalError, ProvideInherent}; -pub trait Trait: inclusion::Trait + scheduler::Trait {} +pub trait Config: inclusion::Config + scheduler::Config {} decl_storage! { - trait Store for Module as ParaInclusionInherent { + trait Store for Module as ParaInclusionInherent { /// Whether the inclusion inherent was included within this block. /// /// The `Option<()>` is effectively a bool, but it never hits storage in the `None` variant @@ -53,7 +54,7 @@ decl_storage! { } decl_error! { - pub enum Error for Module { + pub enum Error for Module { /// Inclusion inherent called more than once per block. TooManyInclusionInherents, } @@ -61,7 +62,7 @@ decl_error! { decl_module! { /// The inclusion inherent module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; fn on_initialize() -> Weight { @@ -103,7 +104,7 @@ decl_module! { let freed = freed_concluded.into_iter().map(|c| (c, FreedReason::Concluded)) .chain(freed_timeout.into_iter().map(|c| (c, FreedReason::TimedOut))); - >::schedule(freed.collect()); + >::schedule(freed); // Process backed candidates according to scheduled cores. let occupied = >::process_candidates( @@ -115,6 +116,9 @@ decl_module! { // Note which of the scheduled cores were actually occupied by a backed candidate. >::occupied(&occupied); + // Give some time slice to dispatch pending upward messages. + >::process_pending_upward_messages(); + // And track that we've finished processing the inherent for this block. Included::set(Some(())); @@ -123,7 +127,7 @@ decl_module! { } } -impl ProvideInherent for Module { +impl ProvideInherent for Module { type Call = Call; type Error = MakeFatalError<()>; const INHERENT_IDENTIFIER: InherentIdentifier = INCLUSION_INHERENT_IDENTIFIER; diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index 8e2e88ff59ebb89e3ec1f3f092e5004b1f1f8f9f..409b52a260794c9b01c9ffd208f141ec38d29eb3 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -26,14 +26,14 @@ use frame_support::{ decl_storage, decl_module, decl_error, traits::Randomness, }; use sp_runtime::traits::One; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use crate::{ configuration::{self, HostConfiguration}, - paras, router, scheduler, inclusion, + paras, scheduler, inclusion, session_info, dmp, ump, hrmp, }; /// Information about a session change that has just occurred. -#[derive(Default, Clone)] +#[derive(Clone)] pub struct SessionChangeNotification { /// The new validators in the session. pub validators: Vec, @@ -49,6 +49,19 @@ pub struct SessionChangeNotification { pub session_index: sp_staking::SessionIndex, } +impl> Default for SessionChangeNotification { + fn default() -> Self { + Self { + validators: Vec::new(), + queued: Vec::new(), + prev_config: HostConfiguration::default(), + new_config: HostConfiguration::default(), + random_seed: Default::default(), + session_index: Default::default(), + } + } +} + #[derive(Encode, Decode)] struct BufferedSessionChange { apply_at: N, @@ -57,20 +70,23 @@ struct BufferedSessionChange { session_index: sp_staking::SessionIndex, } -pub trait Trait: - frame_system::Trait - + configuration::Trait - + paras::Trait - + scheduler::Trait - + inclusion::Trait - + router::Trait +pub trait Config: + frame_system::Config + + configuration::Config + + paras::Config + + scheduler::Config + + inclusion::Config + + session_info::Config + + dmp::Config + + ump::Config + + hrmp::Config { /// A randomness beacon. type Randomness: Randomness; } decl_storage! { - trait Store for Module as Initializer { + trait Store for Module as Initializer { /// Whether the parachains modules have been initialized within this block. /// /// Semantically a bool, but this guarantees it should never hit the trie, @@ -92,12 +108,12 @@ decl_storage! { } decl_error! { - pub enum Error for Module { } + pub enum Error for Module { } } decl_module! { /// The initializer module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; fn on_initialize(now: T::BlockNumber) -> Weight { @@ -121,13 +137,19 @@ decl_module! { // - Paras // - Scheduler // - Inclusion + // - SessionInfo // - Validity - // - Router + // - DMP + // - UMP + // - HRMP let total_weight = configuration::Module::::initializer_initialize(now) + paras::Module::::initializer_initialize(now) + scheduler::Module::::initializer_initialize(now) + inclusion::Module::::initializer_initialize(now) + - router::Module::::initializer_initialize(now); + session_info::Module::::initializer_initialize(now) + + dmp::Module::::initializer_initialize(now) + + ump::Module::::initializer_initialize(now) + + hrmp::Module::::initializer_initialize(now); HasInitialized::set(Some(())); @@ -137,7 +159,10 @@ decl_module! { fn on_finalize() { // reverse initialization order. - router::Module::::initializer_finalize(); + hrmp::Module::::initializer_finalize(); + ump::Module::::initializer_finalize(); + dmp::Module::::initializer_finalize(); + session_info::Module::::initializer_finalize(); inclusion::Module::::initializer_finalize(); scheduler::Module::::initializer_finalize(); paras::Module::::initializer_finalize(); @@ -147,7 +172,7 @@ decl_module! { } } -impl Module { +impl Module { fn apply_new_session( session_index: sp_staking::SessionIndex, validators: Vec, @@ -181,7 +206,10 @@ impl Module { paras::Module::::initializer_on_new_session(¬ification); scheduler::Module::::initializer_on_new_session(¬ification); inclusion::Module::::initializer_on_new_session(¬ification); - router::Module::::initializer_on_new_session(¬ification); + session_info::Module::::initializer_on_new_session(¬ification); + dmp::Module::::initializer_on_new_session(¬ification); + ump::Module::::initializer_on_new_session(¬ification); + hrmp::Module::::initializer_on_new_session(¬ification); } /// Should be called when a new session occurs. Buffers the session notification to be applied @@ -210,11 +238,11 @@ impl Module { } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = ValidatorId; } -impl pallet_session::OneSessionHandler for Module { +impl pallet_session::OneSessionHandler for Module { type Key = ValidatorId; fn on_genesis_session<'a, I: 'a>(_validators: I) diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 833ff6ae4793d0c951d071acc68cdfefbe06c340..dfdf0be2850eb112e0ac6e0bb4e692e9fe7693ec 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -27,10 +27,12 @@ pub mod inclusion; pub mod inclusion_inherent; pub mod initializer; pub mod paras; -pub mod router; pub mod scheduler; -pub mod validity; +pub mod session_info; pub mod origin; +pub mod dmp; +pub mod ump; +pub mod hrmp; pub mod runtime_api_impl; @@ -40,3 +42,25 @@ mod util; mod mock; pub use origin::{Origin, ensure_parachain}; + +/// Schedule a para to be initialized at the start of the next session with the given genesis data. +pub fn schedule_para_initialize( + id: primitives::v1::Id, + genesis: paras::ParaGenesisArgs, +) { + >::schedule_para_initialize(id, genesis); +} + +/// Schedule a para to be cleaned up at the start of the next session. +pub fn schedule_para_cleanup(id: primitives::v1::Id) +where + T: paras::Config + + dmp::Config + + ump::Config + + hrmp::Config, +{ + >::schedule_para_cleanup(id); + >::schedule_para_cleanup(id); + >::schedule_para_cleanup(id); + >::schedule_para_cleanup(id); +} diff --git a/runtime/parachains/src/mock.rs b/runtime/parachains/src/mock.rs index 490c8083ad8f7efc05a6c1d7470c3defc4d77762..0481c1941fadadfd0a30103fb481d84a54260a2c 100644 --- a/runtime/parachains/src/mock.rs +++ b/runtime/parachains/src/mock.rs @@ -17,17 +17,14 @@ //! Mocks for all the traits. use sp_io::TestExternalities; -use sp_core::{H256}; -use sp_runtime::{ - Perbill, - traits::{ - BlakeTwo256, IdentityLookup, - }, +use sp_core::H256; +use sp_runtime::traits::{ + BlakeTwo256, IdentityLookup, }; -use primitives::v1::{BlockNumber, Header}; +use primitives::v1::{AuthorityDiscoveryId, BlockNumber, Header}; use frame_support::{ impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, - weights::Weight, traits::Randomness as RandomnessT, + traits::Randomness as RandomnessT, }; use crate::inclusion; use crate as parachains; @@ -65,13 +62,15 @@ impl RandomnessT for TestRandomness { parameter_types! { pub const BlockHashCount: u32 = 250; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(4 * 1024 * 1024); } -impl frame_system::Trait for Test { +impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = u64; @@ -83,13 +82,6 @@ impl frame_system::Trait for Test { type Header = Header; type Event = TestEvent; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = (); - type ExtrinsicBaseWeight = (); - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = (); type PalletInfo = (); type AccountData = pallet_balances::AccountData; @@ -98,24 +90,40 @@ impl frame_system::Trait for Test { type SystemWeightInfo = (); } -impl crate::initializer::Trait for Test { +impl crate::initializer::Config for Test { type Randomness = TestRandomness; } -impl crate::configuration::Trait for Test { } +impl crate::configuration::Config for Test { } -impl crate::paras::Trait for Test { +impl crate::paras::Config for Test { type Origin = Origin; } -impl crate::router::Trait for Test { } +impl crate::dmp::Config for Test { } + +impl crate::ump::Config for Test { + type UmpSink = crate::ump::mock_sink::MockUmpSink; +} + +impl crate::hrmp::Config for Test { + type Origin = Origin; +} -impl crate::scheduler::Trait for Test { } +impl crate::scheduler::Config for Test { } -impl crate::inclusion::Trait for Test { +impl crate::inclusion::Config for Test { type Event = TestEvent; } +impl crate::session_info::Config for Test { } + +impl crate::session_info::AuthorityDiscoveryConfig for Test { + fn authorities() -> Vec { + Vec::new() + } +} + pub type System = frame_system::Module; /// Mocked initializer. @@ -127,8 +135,14 @@ pub type Configuration = crate::configuration::Module; /// Mocked paras. pub type Paras = crate::paras::Module; -/// Mocked router. -pub type Router = crate::router::Module; +/// Mocked DMP +pub type Dmp = crate::dmp::Module; + +/// Mocked UMP +pub type Ump = crate::ump::Module; + +/// Mocked HRMP +pub type Hrmp = crate::hrmp::Module; /// Mocked scheduler. pub type Scheduler = crate::scheduler::Module; @@ -136,6 +150,9 @@ pub type Scheduler = crate::scheduler::Module; /// Mocked inclusion module. pub type Inclusion = crate::inclusion::Module; +/// Mocked session info module. +pub type SessionInfo = crate::session_info::Module; + /// Create a new set of test externalities. pub fn new_test_ext(state: GenesisConfig) -> TestExternalities { let mut t = state.system.build_storage::().unwrap(); diff --git a/runtime/parachains/src/origin.rs b/runtime/parachains/src/origin.rs index 3537b26a130aa47d0453e1c4d63bbc0499a411be..f238b91fba9d1a9552618489ee2332ec80457f66 100644 --- a/runtime/parachains/src/origin.rs +++ b/runtime/parachains/src/origin.rs @@ -19,7 +19,7 @@ use sp_std::result; use sp_runtime::traits::BadOrigin; use primitives::v1::Id as ParaId; -use codec::{Decode, Encode}; +use parity_scale_codec::{Decode, Encode}; /// Origin for the parachains. #[derive(PartialEq, Eq, Clone, Encode, Decode, sp_core::RuntimeDebug)] @@ -40,7 +40,7 @@ pub fn ensure_parachain(o: OuterOrigin) -> result::Result for enum Call where origin: ::Origin {} + pub struct Module for enum Call where origin: ::Origin {} +} + +impl From for Origin { + fn from(id: u32) -> Origin { + Origin::Parachain(id.into()) + } } diff --git a/runtime/parachains/src/paras.rs b/runtime/parachains/src/paras.rs index 4d7f1e2a07c54d6a73d9403e163c0363a2826d21..7a9375b029b74939f5e6022f0f84b097e4be744b 100644 --- a/runtime/parachains/src/paras.rs +++ b/runtime/parachains/src/paras.rs @@ -36,7 +36,7 @@ use frame_support::{ traits::Get, weights::Weight, }; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use crate::{configuration, initializer::SessionChangeNotification}; use sp_core::RuntimeDebug; @@ -45,11 +45,11 @@ use serde::{Serialize, Deserialize}; pub use crate::Origin; -pub trait Trait: frame_system::Trait + configuration::Trait { +pub trait Config: frame_system::Config + configuration::Config { /// The outer origin type. type Origin: From - + From<::Origin> - + Into::Origin>>; + + From<::Origin> + + Into::Origin>>; } // the two key times necessary to track for every code replacement. @@ -177,7 +177,7 @@ pub struct ParaGenesisArgs { } decl_storage! { - trait Store for Module as Paras { + trait Store for Module as Paras { /// All parachains. Ordered ascending by ParaId. Parathreads are not included. Parachains get(fn parachains): Vec; /// All parathreads. @@ -224,7 +224,7 @@ decl_storage! { } #[cfg(feature = "std")] -fn build(config: &GenesisConfig) { +fn build(config: &GenesisConfig) { let mut parachains: Vec<_> = config.paras .iter() .filter(|(_, args)| args.parachain) @@ -244,17 +244,17 @@ fn build(config: &GenesisConfig) { } decl_error! { - pub enum Error for Module { } + pub enum Error for Module { } } decl_module! { /// The parachains configuration module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; } } -impl Module { +impl Module { /// Called by the initializer to initialize the configuration module. pub(crate) fn initializer_initialize(now: T::BlockNumber) -> Weight { Self::prune_old_code(now) @@ -396,7 +396,7 @@ impl Module { } /// Schedule a para to be initialized at the start of the next session. - pub fn schedule_para_initialize(id: ParaId, genesis: ParaGenesisArgs) -> Weight { + pub(crate) fn schedule_para_initialize(id: ParaId, genesis: ParaGenesisArgs) -> Weight { let dup = UpcomingParas::mutate(|v| { match v.binary_search(&id) { Ok(_) => true, @@ -418,7 +418,7 @@ impl Module { } /// Schedule a para to be cleaned up at the start of the next session. - pub fn schedule_para_cleanup(id: ParaId) -> Weight { + pub(crate) fn schedule_para_cleanup(id: ParaId) -> Weight { let upcoming_weight = UpcomingParas::mutate(|v| { match v.binary_search(&id) { Ok(i) => { @@ -541,6 +541,12 @@ impl Module { } } + /// Returns whether the given ID refers to a valid para. + pub fn is_valid_para(id: ParaId) -> bool { + Self::parachains().binary_search(&id).is_ok() + || Self::is_parathread(id) + } + /// Whether a para ID corresponds to any live parathread. pub(crate) fn is_parathread(id: ParaId) -> bool { Parathreads::get(&id).is_some() diff --git a/runtime/parachains/src/router.rs b/runtime/parachains/src/router.rs deleted file mode 100644 index ad12a33bc8c7b354f552e6992637c7eaadb80c1e..0000000000000000000000000000000000000000 --- a/runtime/parachains/src/router.rs +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! The router module is responsible for handling messaging. -//! -//! The core of the messaging is checking and processing messages sent out by the candidates, -//! routing the messages at their destinations and informing the parachains about the incoming -//! messages. - -use crate::{ - configuration, - initializer, -}; -use sp_std::prelude::*; -use frame_support::{decl_error, decl_module, decl_storage, weights::Weight}; -use primitives::v1::{Id as ParaId, InboundDownwardMessage, Hash}; - -mod dmp; - -pub use dmp::QueueDownwardMessageError; - -pub trait Trait: frame_system::Trait + configuration::Trait {} - -decl_storage! { - trait Store for Module as Router { - /// Paras that are to be cleaned up at the end of the session. - /// The entries are sorted ascending by the para id. - OutgoingParas: Vec; - - /* - * Downward Message Passing (DMP) - * - * Storage layout required for implementation of DMP. - */ - - /// The downward messages addressed for a certain para. - DownwardMessageQueues: map hasher(twox_64_concat) ParaId => Vec>; - /// A mapping that stores the downward message queue MQC head for each para. - /// - /// Each link in this chain has a form: - /// `(prev_head, B, H(M))`, where - /// - `prev_head`: is the previous head hash or zero if none. - /// - `B`: is the relay-chain block number in which a message was appended. - /// - `H(M)`: is the hash of the message being appended. - DownwardMessageQueueHeads: map hasher(twox_64_concat) ParaId => Hash; - } -} - -decl_error! { - pub enum Error for Module { } -} - -decl_module! { - /// The router module. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - } -} - -impl Module { - /// Block initialization logic, called by initializer. - pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { - 0 - } - - /// Block finalization logic, called by initializer. - pub(crate) fn initializer_finalize() {} - - /// Called by the initializer to note that a new session has started. - pub(crate) fn initializer_on_new_session( - _notification: &initializer::SessionChangeNotification, - ) { - let outgoing = OutgoingParas::take(); - for outgoing_para in outgoing { - Self::clean_dmp_after_outgoing(outgoing_para); - } - } - - /// Schedule a para to be cleaned up at the start of the next session. - pub fn schedule_para_cleanup(id: ParaId) { - OutgoingParas::mutate(|v| { - if let Err(i) = v.binary_search(&id) { - v.insert(i, id); - } - }); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use primitives::v1::BlockNumber; - use frame_support::traits::{OnFinalize, OnInitialize}; - - use crate::mock::{System, Router, GenesisConfig as MockGenesisConfig}; - - pub(crate) fn run_to_block(to: BlockNumber, new_session: Option>) { - while System::block_number() < to { - let b = System::block_number(); - Router::initializer_finalize(); - System::on_finalize(b); - - System::on_initialize(b + 1); - System::set_block_number(b + 1); - - if new_session.as_ref().map_or(false, |v| v.contains(&(b + 1))) { - Router::initializer_on_new_session(&Default::default()); - } - Router::initializer_initialize(b + 1); - } - } - - pub(crate) fn default_genesis_config() -> MockGenesisConfig { - MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: crate::configuration::HostConfiguration { - max_downward_message_size: 1024, - ..Default::default() - }, - }, - ..Default::default() - } - } -} diff --git a/runtime/parachains/src/runtime_api_impl/v1.rs b/runtime/parachains/src/runtime_api_impl/v1.rs index 5eb6ac60261ea00b7d484eb86408d5065f1d9498..f099c0db150be0af96fca5711a704dda118b7f5c 100644 --- a/runtime/parachains/src/runtime_api_impl/v1.rs +++ b/runtime/parachains/src/runtime_api_impl/v1.rs @@ -18,24 +18,24 @@ //! functions. use sp_std::prelude::*; +use sp_std::collections::btree_map::BTreeMap; use primitives::v1::{ ValidatorId, ValidatorIndex, GroupRotationInfo, CoreState, ValidationData, Id as ParaId, OccupiedCoreAssumption, SessionIndex, ValidationCode, CommittedCandidateReceipt, ScheduledCore, OccupiedCore, CoreOccupied, CoreIndex, - GroupIndex, CandidateEvent, PersistedValidationData, AuthorityDiscoveryId, - InboundDownwardMessage, + GroupIndex, CandidateEvent, PersistedValidationData, SessionInfo, + InboundDownwardMessage, InboundHrmpMessage, }; -use sp_runtime::traits::Zero; use frame_support::debug; -use crate::{initializer, inclusion, scheduler, configuration, paras, router}; +use crate::{initializer, inclusion, scheduler, configuration, paras, session_info, dmp, hrmp}; /// Implementation for the `validators` function of the runtime API. -pub fn validators() -> Vec { +pub fn validators() -> Vec { >::validators() } /// Implementation for the `validator_groups` function of the runtime API. -pub fn validator_groups() -> ( +pub fn validator_groups() -> ( Vec>, GroupRotationInfo, ) { @@ -46,7 +46,7 @@ pub fn validator_groups() -> ( } /// Implementation for the `availability_cores` function of the runtime API. -pub fn availability_cores() -> Vec> { +pub fn availability_cores() -> Vec> { let cores = >::availability_cores(); let parachains = >::parachains(); let config = >::config(); @@ -56,10 +56,6 @@ pub fn availability_cores() -> Vec() -> Vec( +fn with_assumption( para_id: ParaId, assumption: OccupiedCoreAssumption, build: F, ) -> Option where - Trait: inclusion::Trait, + Config: inclusion::Config, F: FnOnce() -> Option, { match assumption { OccupiedCoreAssumption::Included => { - >::force_enact(para_id); + >::force_enact(para_id); build() } OccupiedCoreAssumption::TimedOut => { build() } OccupiedCoreAssumption::Free => { - if >::pending_availability(para_id).is_some() { + if >::pending_availability(para_id).is_some() { None } else { build() @@ -189,7 +185,7 @@ fn with_assumption( } /// Implementation for the `full_validation_data` function of the runtime API. -pub fn full_validation_data( +pub fn full_validation_data( para_id: ParaId, assumption: OccupiedCoreAssumption, ) @@ -206,7 +202,7 @@ pub fn full_validation_data( } /// Implementation for the `persisted_validation_data` function of the runtime API. -pub fn persisted_validation_data( +pub fn persisted_validation_data( para_id: ParaId, assumption: OccupiedCoreAssumption, ) -> Option> { @@ -218,16 +214,15 @@ pub fn persisted_validation_data( } /// Implementation for the `check_validation_outputs` function of the runtime API. -pub fn check_validation_outputs( +pub fn check_validation_outputs( para_id: ParaId, - outputs: primitives::v1::ValidationOutputs, + outputs: primitives::v1::CandidateCommitments, ) -> bool { - // we strip detailed information from error here for the sake of simplicity of runtime API. - >::check_validation_outputs(para_id, outputs).is_ok() + >::check_validation_outputs(para_id, outputs) } /// Implementation for the `session_index_for_child` function of the runtime API. -pub fn session_index_for_child() -> SessionIndex { +pub fn session_index_for_child() -> SessionIndex { // Just returns the session index from `inclusion`. Runtime APIs follow // initialization so the initializer will have applied any pending session change // which is expected at the child of the block whose context the runtime API was invoked @@ -239,7 +234,7 @@ pub fn session_index_for_child() -> SessionIndex { } /// Implementation for the `validation_code` function of the runtime API. -pub fn validation_code( +pub fn validation_code( para_id: ParaId, assumption: OccupiedCoreAssumption, ) -> Option { @@ -250,8 +245,16 @@ pub fn validation_code( ) } +/// Implementation for the `historical_validation_code` function of the runtime API. +pub fn historical_validation_code( + para_id: ParaId, + context_height: T::BlockNumber, +) -> Option { + >::validation_code_at(para_id, context_height, None) +} + /// Implementation for the `candidate_pending_availability` function of the runtime API. -pub fn candidate_pending_availability(para_id: ParaId) +pub fn candidate_pending_availability(para_id: ParaId) -> Option> { >::candidate_pending_availability(para_id) @@ -262,8 +265,8 @@ pub fn candidate_pending_availability(para_id: ParaId) // this means it can run in a different session than other runtime APIs at the same block. pub fn candidate_events(extract_event: F) -> Vec> where - T: initializer::Trait, - F: Fn(::Event) -> Option>, + T: initializer::Config, + F: Fn(::Event) -> Option>, { use inclusion::Event as RawEvent; @@ -277,33 +280,21 @@ where .collect() } -/// Get the `AuthorityDiscoveryId`s corresponding to the given `ValidatorId`s. -/// Currently this request is limited to validators in the current session. -/// -/// We assume that every validator runs authority discovery, -/// which would allow us to establish point-to-point connection to given validators. -// FIXME: handle previous sessions: -// https://github.com/paritytech/polkadot/issues/1461 -pub fn validator_discovery(validators: Vec) -> Vec> -where - T: initializer::Trait + pallet_authority_discovery::Trait, -{ - // FIXME: the mapping might be invalid if a session change happens in between the calls - // use SessionInfo from https://github.com/paritytech/polkadot/pull/1691 - let current_validators = >::validators(); - let authorities = >::authorities(); - // We assume the same ordering in authorities as in validators so we can do an index search - validators.iter().map(|id| { - // FIXME: linear search is slow O(n^2) - // use SessionInfo from https://github.com/paritytech/polkadot/pull/1691 - let validator_index = current_validators.iter().position(|v| v == id); - validator_index.and_then(|i| authorities.get(i).cloned()) - }).collect() +/// Get the session info for the given session, if stored. +pub fn session_info(index: SessionIndex) -> Option { + >::session_info(index) } /// Implementation for the `dmq_contents` function of the runtime API. -pub fn dmq_contents( +pub fn dmq_contents( recipient: ParaId, ) -> Vec> { - >::dmq_contents(recipient) + >::dmq_contents(recipient) +} + +/// Implementation for the `inbound_hrmp_channels_contents` function of the runtime API. +pub fn inbound_hrmp_channels_contents( + recipient: ParaId, +) -> BTreeMap>> { + >::inbound_hrmp_channels_contents(recipient) } diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 455a07f94e7493b890e83042f3a77e9610472a08..7eaf8e6c1367dcd9582371a906c37b4ecf6afe8e 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -45,8 +45,8 @@ use frame_support::{ decl_storage, decl_module, decl_error, weights::Weight, }; -use codec::{Encode, Decode}; -use sp_runtime::traits::{Saturating, Zero}; +use parity_scale_codec::{Encode, Decode}; +use sp_runtime::traits::Saturating; use rand::{SeedableRng, seq::SliceRandom}; use rand_chacha::ChaCha20Rng; @@ -153,10 +153,10 @@ impl CoreAssignment { } } -pub trait Trait: frame_system::Trait + configuration::Trait + paras::Trait { } +pub trait Config: frame_system::Config + configuration::Config + paras::Config { } decl_storage! { - trait Store for Module as ParaScheduler { + trait Store for Module as ParaScheduler { /// All the validator groups. One for each core. /// /// Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers. @@ -190,17 +190,17 @@ decl_storage! { } decl_error! { - pub enum Error for Module { } + pub enum Error for Module { } } decl_module! { /// The scheduler module. - pub struct Module for enum Call where origin: ::Origin { + pub struct Module for enum Call where origin: ::Origin { type Error = Error; } } -impl Module { +impl Module { /// Called by the initializer to initialize the scheduler module. pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { Self::schedule(Vec::new()); @@ -273,8 +273,17 @@ impl Module { shuffled_indices.shuffle(&mut rng); - let group_base_size = validators.len() / n_cores as usize; - let n_larger_groups = validators.len() % n_cores as usize; + // trim to max per cores. do this after shuffling. + { + if let Some(max_per_core) = config.max_validators_per_core { + let max_total = max_per_core * n_cores; + shuffled_indices.truncate(max_total as usize); + } + } + + let group_base_size = shuffled_indices.len() / n_cores as usize; + let n_larger_groups = shuffled_indices.len() % n_cores as usize; + let groups: Vec> = (0..n_cores).map(|core_id| { let n_members = if (core_id as usize) < n_larger_groups { group_base_size + 1 @@ -367,7 +376,7 @@ impl Module { /// Schedule all unassigned cores, where possible. Provide a list of cores that should be considered /// newly-freed along with the reason for them being freed. The list is assumed to be sorted in /// ascending order by core index. - pub(crate) fn schedule(just_freed_cores: Vec<(CoreIndex, FreedReason)>) { + pub(crate) fn schedule(just_freed_cores: impl IntoIterator) { let mut cores = AvailabilityCores::get(); let config = >::config(); @@ -495,6 +504,7 @@ impl Module { Scheduled::set(scheduled); ParathreadQueue::set(parathread_queue); + AvailabilityCores::set(cores); } /// Note that the given cores have become occupied. Behavior undefined if any of the given cores were not scheduled @@ -558,11 +568,6 @@ impl Module { if at < session_start_block { return None } - if config.group_rotation_frequency.is_zero() { - // interpret this as "no rotations" - return Some(GroupIndex(core.0)); - } - let validator_groups = ValidatorGroups::get(); if core.0 as usize >= validator_groups.len() { return None } @@ -589,9 +594,6 @@ impl Module { /// timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` /// of the last rotation would this return `Some`, unless there are no rotations. /// - /// If there are no rotations (config.group_rotation_frequency == 0), - /// availability timeouts can occur at any block. - /// /// This really should not be a box, but is working around a compiler limitation filed here: /// https://github.com/rust-lang/rust/issues/73226 /// which prevents us from testing the code if using `impl Trait`. @@ -601,12 +603,7 @@ impl Module { let session_start = >::get(); let blocks_since_session_start = now.saturating_sub(session_start); - let no_rotation = config.group_rotation_frequency.is_zero(); - let blocks_since_last_rotation = if no_rotation { - ::zero() - } else { - blocks_since_session_start % config.group_rotation_frequency - }; + let blocks_since_last_rotation = blocks_since_session_start % config.group_rotation_frequency; let absolute_cutoff = sp_std::cmp::max( config.chain_availability_period, @@ -1054,6 +1051,68 @@ mod tests { }); } + #[test] + fn session_change_takes_only_max_per_core() { + let config = { + let mut config = default_config(); + config.parathread_cores = 0; + config.max_validators_per_core = Some(1); + config + }; + + let genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: config.clone(), + ..Default::default() + }, + ..Default::default() + }; + + new_test_ext(genesis_config).execute_with(|| { + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + + // ensure that we have 5 groups by registering 2 parachains. + Paras::schedule_para_initialize(chain_a, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: true, + }); + Paras::schedule_para_initialize(chain_b, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: true, + }); + + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: config.clone(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ValidatorId::from(Sr25519Keyring::Ferdie.public()), + ValidatorId::from(Sr25519Keyring::One.public()), + ], + random_seed: [99; 32], + ..Default::default() + }), + _ => None, + }); + + let groups = ValidatorGroups::get(); + assert_eq!(groups.len(), 2); + + // Even though there are 7 validators, only 1 validator per group + // due to the max. + for i in 0..2 { + assert_eq!(groups[i].len(), 1); + } + }); + } + #[test] fn schedule_schedules() { let genesis_config = MockGenesisConfig { @@ -1328,6 +1387,100 @@ mod tests { }); } + #[test] + fn schedule_clears_availability_cores() { + let genesis_config = MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: default_config(), + ..Default::default() + }, + ..Default::default() + }; + + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + let chain_c = ParaId::from(3); + + let schedule_blank_para = |id, is_chain| Paras::schedule_para_initialize(id, ParaGenesisArgs { + genesis_head: Vec::new().into(), + validation_code: Vec::new().into(), + parachain: is_chain, + }); + + new_test_ext(genesis_config).execute_with(|| { + assert_eq!(default_config().parathread_cores, 3); + + // register 3 parachains + schedule_blank_para(chain_a, true); + schedule_blank_para(chain_b, true); + schedule_blank_para(chain_c, true); + + // start a new session to activate, 5 validators for 5 cores. + run_to_block(1, |number| match number { + 1 => Some(SessionChangeNotification { + new_config: default_config(), + validators: vec![ + ValidatorId::from(Sr25519Keyring::Alice.public()), + ValidatorId::from(Sr25519Keyring::Bob.public()), + ValidatorId::from(Sr25519Keyring::Charlie.public()), + ValidatorId::from(Sr25519Keyring::Dave.public()), + ValidatorId::from(Sr25519Keyring::Eve.public()), + ], + ..Default::default() + }), + _ => None, + }); + + run_to_block(2, |_| None); + + assert_eq!(Scheduler::scheduled().len(), 3); + + // cores 0, 1, and 2 should be occupied. mark them as such. + Scheduler::occupied(&[CoreIndex(0), CoreIndex(1), CoreIndex(2)]); + + { + let cores = AvailabilityCores::get(); + + assert!(cores[0].is_some()); + assert!(cores[1].is_some()); + assert!(cores[2].is_some()); + + assert!(Scheduler::scheduled().is_empty()); + } + + run_to_block(3, |_| None); + + // now note that cores 0 and 2 were freed. + Scheduler::schedule(vec![ + (CoreIndex(0), FreedReason::Concluded), + (CoreIndex(2), FreedReason::Concluded), + ]); + + { + let scheduled = Scheduler::scheduled(); + + assert_eq!(scheduled.len(), 2); + assert_eq!(scheduled[0], CoreAssignment { + core: CoreIndex(0), + para_id: chain_a, + kind: AssignmentKind::Parachain, + group_idx: GroupIndex(0), + }); + assert_eq!(scheduled[1], CoreAssignment { + core: CoreIndex(2), + para_id: chain_c, + kind: AssignmentKind::Parachain, + group_idx: GroupIndex(2), + }); + + // The freed cores should be `None` in `AvailabilityCores`. + let cores = AvailabilityCores::get(); + assert!(cores[0].is_none()); + assert!(cores[2].is_none()); + } + }); + } + #[test] fn schedule_rotates_groups() { let config = { @@ -1492,8 +1645,10 @@ mod tests { } = default_config(); let collator = CollatorId::from(Sr25519Keyring::Alice.public()); - assert!(chain_availability_period < thread_availability_period && - thread_availability_period < group_rotation_frequency); + assert!( + chain_availability_period < thread_availability_period + && thread_availability_period < group_rotation_frequency + ); let chain_a = ParaId::from(1); let thread_a = ParaId::from(2); @@ -1583,92 +1738,6 @@ mod tests { }); } - #[test] - fn availability_predicate_no_rotation() { - let genesis_config = MockGenesisConfig { - configuration: crate::configuration::GenesisConfig { - config: HostConfiguration { - group_rotation_frequency: 0, // no rotation - ..default_config() - }, - ..Default::default() - }, - ..Default::default() - }; - let HostConfiguration { - chain_availability_period, - thread_availability_period, - .. - } = default_config(); - let collator = CollatorId::from(Sr25519Keyring::Alice.public()); - - let chain_a = ParaId::from(1); - let thread_a = ParaId::from(2); - - let schedule_blank_para = |id, is_chain| Paras::schedule_para_initialize(id, ParaGenesisArgs { - genesis_head: Vec::new().into(), - validation_code: Vec::new().into(), - parachain: is_chain, - }); - - new_test_ext(genesis_config).execute_with(|| { - schedule_blank_para(chain_a, true); - schedule_blank_para(thread_a, false); - - // start a new session with our chain & thread registered. - run_to_block(1, |number| match number { - 1 => Some(SessionChangeNotification { - new_config: HostConfiguration{ - // Note: the `group_rotation_frequency` config change - // is not accounted for on session change - // group_rotation_frequency: 0, - ..default_config() - }, - validators: vec![ - ValidatorId::from(Sr25519Keyring::Alice.public()), - ValidatorId::from(Sr25519Keyring::Bob.public()), - ValidatorId::from(Sr25519Keyring::Charlie.public()), - ValidatorId::from(Sr25519Keyring::Dave.public()), - ValidatorId::from(Sr25519Keyring::Eve.public()), - ], - ..Default::default() - }), - _ => None, - }); - - // assign some availability cores. - { - AvailabilityCores::mutate(|cores| { - cores[0] = Some(CoreOccupied::Parachain); - cores[1] = Some(CoreOccupied::Parathread(ParathreadEntry { - claim: ParathreadClaim(thread_a, collator), - retries: 0, - })) - }); - } - run_to_block(1 + 1, |_| None); - run_to_block(1 + 1 + 100500, |_| None); - { - let pred = Scheduler::availability_timeout_predicate() - .expect("predicate exists with no rotation"); - - let now = System::block_number(); - - assert!(!pred(CoreIndex(0), now)); // assigned: chain - assert!(!pred(CoreIndex(1), now)); // assigned: thread - assert!(pred(CoreIndex(2), now)); - - // check the tighter bound on chains vs threads. - assert!(pred(CoreIndex(0), now - chain_availability_period)); - assert!(pred(CoreIndex(1), now - thread_availability_period)); - - // check the threshold is exact. - assert!(!pred(CoreIndex(0), now - chain_availability_period + 1)); - assert!(!pred(CoreIndex(1), now - thread_availability_period + 1)); - } - }); - } - #[test] fn next_up_on_available_uses_next_scheduled_or_none_for_thread() { let mut config = default_config(); diff --git a/runtime/parachains/src/session_info.rs b/runtime/parachains/src/session_info.rs new file mode 100644 index 0000000000000000000000000000000000000000..decc25a20d01fdcf34329710820b0f12a7fc39ba --- /dev/null +++ b/runtime/parachains/src/session_info.rs @@ -0,0 +1,281 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The session info module provides information about validator sets +//! from prior sessions needed for approvals and disputes. +//! +//! See https://w3f.github.io/parachain-implementers-guide/runtime/session_info.html. + +use primitives::v1::{AuthorityDiscoveryId, SessionIndex, SessionInfo}; +use frame_support::{ + decl_storage, decl_module, decl_error, + weights::Weight, +}; +use crate::{configuration, paras, scheduler}; +use sp_std::{cmp, vec::Vec}; + +pub trait Config: + frame_system::Config + + configuration::Config + + paras::Config + + scheduler::Config + + AuthorityDiscoveryConfig +{ +} + +decl_storage! { + trait Store for Module as ParaSessionInfo { + /// The earliest session for which previous session info is stored. + EarliestStoredSession get(fn earliest_stored_session): SessionIndex; + /// Session information in a rolling window. + /// Should have an entry in range `EarliestStoredSession..=CurrentSessionIndex`. + /// Does not have any entries before the session index in the first session change notification. + Sessions get(fn session_info): map hasher(identity) SessionIndex => Option; + } +} + +decl_error! { + pub enum Error for Module { } +} + +decl_module! { + /// The session info module. + pub struct Module for enum Call where origin: ::Origin { + type Error = Error; + } +} + +/// An abstraction for the authority discovery pallet +/// to help with mock testing. +pub trait AuthorityDiscoveryConfig { + /// Retrieve authority identifiers of the current and next authority set. + fn authorities() -> Vec; +} + +impl AuthorityDiscoveryConfig for T { + fn authorities() -> Vec { + >::authorities() + } +} + +impl Module { + /// Handle an incoming session change. + pub(crate) fn initializer_on_new_session( + notification: &crate::initializer::SessionChangeNotification + ) { + let config = >::config(); + + let dispute_period = config.dispute_period; + let n_parachains = >::parachains().len() as u32; + + let validators = notification.validators.clone(); + let discovery_keys = ::authorities(); + // FIXME: once we store these keys: https://github.com/paritytech/polkadot/issues/1975 + let approval_keys = Default::default(); + let validator_groups = >::validator_groups(); + let n_cores = n_parachains + config.parathread_cores; + let zeroth_delay_tranche_width = config.zeroth_delay_tranche_width; + let relay_vrf_modulo_samples = config.relay_vrf_modulo_samples; + let n_delay_tranches = config.n_delay_tranches; + let no_show_slots = config.no_show_slots; + let needed_approvals = config.needed_approvals; + + let new_session_index = notification.session_index; + let old_earliest_stored_session = EarliestStoredSession::get(); + let dispute_period = cmp::max(1, dispute_period); + let new_earliest_stored_session = new_session_index.checked_sub(dispute_period - 1).unwrap_or(0); + let new_earliest_stored_session = cmp::max(new_earliest_stored_session, old_earliest_stored_session); + // update `EarliestStoredSession` based on `config.dispute_period` + EarliestStoredSession::set(new_earliest_stored_session); + // remove all entries from `Sessions` from the previous value up to the new value + // avoid a potentially heavy loop when introduced on a live chain + if old_earliest_stored_session != 0 || Sessions::get(0).is_some() { + for idx in old_earliest_stored_session..new_earliest_stored_session { + Sessions::remove(&idx); + } + } + // create a new entry in `Sessions` with information about the current session + let new_session_info = SessionInfo { + validators, + discovery_keys, + approval_keys, + validator_groups, + n_cores, + zeroth_delay_tranche_width, + relay_vrf_modulo_samples, + n_delay_tranches, + no_show_slots, + needed_approvals, + }; + Sessions::insert(&new_session_index, &new_session_info); + } + + /// Called by the initializer to initialize the session info module. + pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { + 0 + } + + /// Called by the initializer to finalize the session info module. + pub(crate) fn initializer_finalize() {} +} + + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{ + new_test_ext, Configuration, SessionInfo, System, GenesisConfig as MockGenesisConfig, + Origin, + }; + use crate::initializer::SessionChangeNotification; + use crate::configuration::HostConfiguration; + use frame_support::traits::{OnFinalize, OnInitialize}; + use primitives::v1::BlockNumber; + + fn run_to_block( + to: BlockNumber, + new_session: impl Fn(BlockNumber) -> Option>, + ) { + while System::block_number() < to { + let b = System::block_number(); + + SessionInfo::initializer_finalize(); + Configuration::initializer_finalize(); + + System::on_finalize(b); + + System::on_initialize(b + 1); + System::set_block_number(b + 1); + + if let Some(notification) = new_session(b + 1) { + Configuration::initializer_on_new_session(¬ification.validators, ¬ification.queued); + SessionInfo::initializer_on_new_session(¬ification); + } + + Configuration::initializer_initialize(b + 1); + SessionInfo::initializer_initialize(b + 1); + } + } + + fn default_config() -> HostConfiguration { + HostConfiguration { + parathread_cores: 1, + dispute_period: 2, + needed_approvals: 3, + ..Default::default() + } + } + + fn genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: configuration::GenesisConfig { + config: default_config(), + ..Default::default() + }, + ..Default::default() + } + } + + fn session_changes(n: BlockNumber) -> Option> { + match n { + 100 => Some(SessionChangeNotification { + session_index: 10, + ..Default::default() + }), + 200 => Some(SessionChangeNotification { + session_index: 20, + ..Default::default() + }), + 300 => Some(SessionChangeNotification { + session_index: 30, + ..Default::default() + }), + 400 => Some(SessionChangeNotification { + session_index: 40, + ..Default::default() + }), + _ => None, + } + } + + fn new_session_every_block(n: BlockNumber) -> Option> { + Some(SessionChangeNotification{ + session_index: n, + ..Default::default() + }) + } + + #[test] + fn session_pruning_is_based_on_dispute_period() { + new_test_ext(genesis_config()).execute_with(|| { + run_to_block(100, session_changes); + assert_eq!(EarliestStoredSession::get(), 9); + + // changing dispute_period works + let dispute_period = 5; + Configuration::set_dispute_period(Origin::root(), dispute_period).unwrap(); + run_to_block(200, session_changes); + assert_eq!(EarliestStoredSession::get(), 20 - dispute_period + 1); + + // we don't have that many sessions stored + let new_dispute_period = 16; + Configuration::set_dispute_period(Origin::root(), new_dispute_period).unwrap(); + run_to_block(300, session_changes); + assert_eq!(EarliestStoredSession::get(), 20 - dispute_period + 1); + + // now we do + run_to_block(400, session_changes); + assert_eq!(EarliestStoredSession::get(), 40 - new_dispute_period + 1); + }) + } + + #[test] + fn session_info_is_based_on_config() { + new_test_ext(genesis_config()).execute_with(|| { + run_to_block(1, new_session_every_block); + let session = Sessions::get(&1).unwrap(); + assert_eq!(session.needed_approvals, 3); + + // change some param + Configuration::set_needed_approvals(Origin::root(), 42).unwrap(); + run_to_block(2, new_session_every_block); + let session = Sessions::get(&2).unwrap(); + assert_eq!(session.needed_approvals, 42); + }) + } + + #[test] + fn session_pruning_avoids_heavy_loop() { + new_test_ext(genesis_config()).execute_with(|| { + let start = 1_000_000_000; + System::on_initialize(start); + System::set_block_number(start); + + if let Some(notification) = new_session_every_block(start) { + Configuration::initializer_on_new_session(¬ification.validators, ¬ification.queued); + SessionInfo::initializer_on_new_session(¬ification); + } + + Configuration::initializer_initialize(start); + SessionInfo::initializer_initialize(start); + + assert_eq!(EarliestStoredSession::get(), start - 1); + + run_to_block(start + 1, new_session_every_block); + assert_eq!(EarliestStoredSession::get(), start); + }) + } +} diff --git a/runtime/parachains/src/ump.rs b/runtime/parachains/src/ump.rs new file mode 100644 index 0000000000000000000000000000000000000000..f18ad250b92daa530f39c94a38ad1fe1354f772a --- /dev/null +++ b/runtime/parachains/src/ump.rs @@ -0,0 +1,874 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use crate::{ + configuration::{self, HostConfiguration}, + initializer, +}; +use sp_std::{fmt, prelude::*}; +use sp_std::collections::{btree_map::BTreeMap, vec_deque::VecDeque}; +use frame_support::{decl_module, decl_storage, StorageMap, StorageValue, weights::Weight, traits::Get}; +use primitives::v1::{Id as ParaId, UpwardMessage}; + +/// All upward messages coming from parachains will be funneled into an implementation of this trait. +/// +/// The message is opaque from the perspective of UMP. The message size can range from 0 to +/// `config.max_upward_message_size`. +/// +/// It's up to the implementation of this trait to decide what to do with a message as long as it +/// returns the amount of weight consumed in the process of handling. Ignoring a message is a valid +/// strategy. +/// +/// There are no guarantees on how much time it takes for the message sent by a candidate to end up +/// in the sink after the candidate was enacted. That typically depends on the UMP traffic, the sizes +/// of upward messages and the configuration of UMP. +/// +/// It is possible that by the time the message is sank the origin parachain was offboarded. It is +/// up to the implementer to check that if it cares. +pub trait UmpSink { + /// Process an incoming upward message and return the amount of weight it consumed. + /// + /// See the trait docs for more details. + fn process_upward_message(origin: ParaId, msg: Vec) -> Weight; +} + +/// An implementation of a sink that just swallows the message without consuming any weight. +impl UmpSink for () { + fn process_upward_message(_: ParaId, _: Vec) -> Weight { + 0 + } +} + +/// An error returned by [`check_upward_messages`] that indicates a violation of one of acceptance +/// criteria rules. +pub enum AcceptanceCheckErr { + MoreMessagesThanPermitted { + sent: u32, + permitted: u32, + }, + MessageSize { + idx: u32, + msg_size: u32, + max_size: u32, + }, + CapacityExceeded { + count: u32, + limit: u32, + }, + TotalSizeExceeded { + total_size: u32, + limit: u32, + }, +} + +impl fmt::Debug for AcceptanceCheckErr { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + AcceptanceCheckErr::MoreMessagesThanPermitted { sent, permitted } => write!( + fmt, + "more upward messages than permitted by config ({} > {})", + sent, permitted, + ), + AcceptanceCheckErr::MessageSize { + idx, + msg_size, + max_size, + } => write!( + fmt, + "upward message idx {} larger than permitted by config ({} > {})", + idx, msg_size, max_size, + ), + AcceptanceCheckErr::CapacityExceeded { count, limit } => write!( + fmt, + "the ump queue would have more items than permitted by config ({} > {})", + count, limit, + ), + AcceptanceCheckErr::TotalSizeExceeded { total_size, limit } => write!( + fmt, + "the ump queue would have grown past the max size permitted by config ({} > {})", + total_size, limit, + ), + } + } +} + +pub trait Config: frame_system::Config + configuration::Config { + /// A place where all received upward messages are funneled. + type UmpSink: UmpSink; +} + +decl_storage! { + trait Store for Module as Ump { + /// Paras that are to be cleaned up at the end of the session. + /// The entries are sorted ascending by the para id. + OutgoingParas: Vec; + + /// The messages waiting to be handled by the relay-chain originating from a certain parachain. + /// + /// Note that some upward messages might have been already processed by the inclusion logic. E.g. + /// channel management messages. + /// + /// The messages are processed in FIFO order. + RelayDispatchQueues: map hasher(twox_64_concat) ParaId => VecDeque; + /// Size of the dispatch queues. Caches sizes of the queues in `RelayDispatchQueue`. + /// + /// First item in the tuple is the count of messages and second + /// is the total length (in bytes) of the message payloads. + /// + /// Note that this is an auxilary mapping: it's possible to tell the byte size and the number of + /// messages only looking at `RelayDispatchQueues`. This mapping is separate to avoid the cost of + /// loading the whole message queue if only the total size and count are required. + /// + /// Invariant: + /// - The set of keys should exactly match the set of keys of `RelayDispatchQueues`. + RelayDispatchQueueSize: map hasher(twox_64_concat) ParaId => (u32, u32); + /// The ordered list of `ParaId`s that have a `RelayDispatchQueue` entry. + /// + /// Invariant: + /// - The set of items from this vector should be exactly the set of the keys in + /// `RelayDispatchQueues` and `RelayDispatchQueueSize`. + NeedsDispatch: Vec; + /// This is the para that gets will get dispatched first during the next upward dispatchable queue + /// execution round. + /// + /// Invariant: + /// - If `Some(para)`, then `para` must be present in `NeedsDispatch`. + NextDispatchRoundStartWith: Option; + } +} + +decl_module! { + /// The UMP module. + pub struct Module for enum Call where origin: ::Origin { + } +} + +/// Routines related to the upward message passing. +impl Module { + /// Block initialization logic, called by initializer. + pub(crate) fn initializer_initialize(_now: T::BlockNumber) -> Weight { + 0 + } + + /// Block finalization logic, called by initializer. + pub(crate) fn initializer_finalize() {} + + /// Called by the initializer to note that a new session has started. + pub(crate) fn initializer_on_new_session( + _notification: &initializer::SessionChangeNotification, + ) { + Self::perform_outgoing_para_cleanup(); + } + + /// Iterate over all paras that were registered for offboarding and remove all the data + /// associated with them. + fn perform_outgoing_para_cleanup() { + let outgoing = OutgoingParas::take(); + for outgoing_para in outgoing { + Self::clean_ump_after_outgoing(outgoing_para); + } + } + + /// Schedule a para to be cleaned up at the start of the next session. + pub(crate) fn schedule_para_cleanup(id: ParaId) { + OutgoingParas::mutate(|v| { + if let Err(i) = v.binary_search(&id) { + v.insert(i, id); + } + }); + } + + fn clean_ump_after_outgoing(outgoing_para: ParaId) { + ::RelayDispatchQueueSize::remove(&outgoing_para); + ::RelayDispatchQueues::remove(&outgoing_para); + + // Remove the outgoing para from the `NeedsDispatch` list and from + // `NextDispatchRoundStartWith`. + // + // That's needed for maintaining invariant that `NextDispatchRoundStartWith` points to an + // existing item in `NeedsDispatch`. + ::NeedsDispatch::mutate(|v| { + if let Ok(i) = v.binary_search(&outgoing_para) { + v.remove(i); + } + }); + ::NextDispatchRoundStartWith::mutate(|v| { + *v = v.filter(|p| *p == outgoing_para) + }); + } + + /// Check that all the upward messages sent by a candidate pass the acceptance criteria. Returns + /// false, if any of the messages doesn't pass. + pub(crate) fn check_upward_messages( + config: &HostConfiguration, + para: ParaId, + upward_messages: &[UpwardMessage], + ) -> Result<(), AcceptanceCheckErr> { + if upward_messages.len() as u32 > config.max_upward_message_num_per_candidate { + return Err(AcceptanceCheckErr::MoreMessagesThanPermitted { + sent: upward_messages.len() as u32, + permitted: config.max_upward_message_num_per_candidate, + }); + } + + let (mut para_queue_count, mut para_queue_size) = + ::RelayDispatchQueueSize::get(¶); + + for (idx, msg) in upward_messages.into_iter().enumerate() { + let msg_size = msg.len() as u32; + if msg_size > config.max_upward_message_size { + return Err(AcceptanceCheckErr::MessageSize { + idx: idx as u32, + msg_size, + max_size: config.max_upward_message_size, + }); + } + para_queue_count += 1; + para_queue_size += msg_size; + } + + // make sure that the queue is not overfilled. + // we do it here only once since returning false invalidates the whole relay-chain block. + if para_queue_count > config.max_upward_queue_count { + return Err(AcceptanceCheckErr::CapacityExceeded { + count: para_queue_count, + limit: config.max_upward_queue_count, + }); + } + if para_queue_size > config.max_upward_queue_size { + return Err(AcceptanceCheckErr::TotalSizeExceeded { + total_size: para_queue_size, + limit: config.max_upward_queue_size, + }); + } + + Ok(()) + } + + /// Enacts all the upward messages sent by a candidate. + pub(crate) fn enact_upward_messages( + para: ParaId, + upward_messages: Vec, + ) -> Weight { + let mut weight = 0; + + if !upward_messages.is_empty() { + let (extra_cnt, extra_size) = upward_messages + .iter() + .fold((0, 0), |(cnt, size), d| (cnt + 1, size + d.len() as u32)); + + ::RelayDispatchQueues::mutate(¶, |v| { + v.extend(upward_messages.into_iter()) + }); + + ::RelayDispatchQueueSize::mutate(¶, |(ref mut cnt, ref mut size)| { + *cnt += extra_cnt; + *size += extra_size; + }); + + ::NeedsDispatch::mutate(|v| { + if let Err(i) = v.binary_search(¶) { + v.insert(i, para); + } + }); + + weight += T::DbWeight::get().reads_writes(3, 3); + } + + weight + } + + /// Devote some time into dispatching pending upward messages. + pub(crate) fn process_pending_upward_messages() { + let mut used_weight_so_far = 0; + + let config = >::config(); + let mut cursor = NeedsDispatchCursor::new::(); + let mut queue_cache = QueueCache::new(); + + while let Some(dispatchee) = cursor.peek() { + if used_weight_so_far >= config.preferred_dispatchable_upward_messages_step_weight { + // Then check whether we've reached or overshoot the + // preferred weight for the dispatching stage. + // + // if so - bail. + break; + } + + // dequeue the next message from the queue of the dispatchee + let (upward_message, became_empty) = queue_cache.dequeue::(dispatchee); + if let Some(upward_message) = upward_message { + used_weight_so_far += + T::UmpSink::process_upward_message(dispatchee, upward_message); + } + + if became_empty { + // the queue is empty now - this para doesn't need attention anymore. + cursor.remove(); + } else { + cursor.advance(); + } + } + + cursor.flush::(); + queue_cache.flush::(); + } +} + +/// To avoid constant fetching, deserializing and serialization the queues are cached. +/// +/// After an item dequeued from a queue for the first time, the queue is stored in this struct rather +/// than being serialized and persisted. +/// +/// This implementation works best when: +/// +/// 1. when the queues are shallow +/// 2. the dispatcher makes more than one cycle +/// +/// if the queues are deep and there are many we would load and keep the queues for a long time, +/// thus increasing the peak memory consumption of the wasm runtime. Under such conditions persisting +/// queues might play better since it's unlikely that they are going to be requested once more. +/// +/// On the other hand, the situation when deep queues exist and it takes more than one dipsatcher +/// cycle to traverse the queues is already sub-optimal and better be avoided. +/// +/// This struct is not supposed to be dropped but rather to be consumed by [`flush`]. +struct QueueCache(BTreeMap); + +struct QueueCacheEntry { + queue: VecDeque, + count: u32, + total_size: u32, +} + +impl QueueCache { + fn new() -> Self { + Self(BTreeMap::new()) + } + + /// Dequeues one item from the upward message queue of the given para. + /// + /// Returns `(upward_message, became_empty)`, where + /// + /// - `upward_message` a dequeued message or `None` if the queue _was_ empty. + /// - `became_empty` is true if the queue _became_ empty. + fn dequeue(&mut self, para: ParaId) -> (Option, bool) { + let cache_entry = self.0.entry(para).or_insert_with(|| { + let queue = as Store>::RelayDispatchQueues::get(¶); + let (count, total_size) = as Store>::RelayDispatchQueueSize::get(¶); + QueueCacheEntry { + queue, + count, + total_size, + } + }); + let upward_message = cache_entry.queue.pop_front(); + if let Some(ref msg) = upward_message { + cache_entry.count -= 1; + cache_entry.total_size -= msg.len() as u32; + } + + let became_empty = cache_entry.queue.is_empty(); + (upward_message, became_empty) + } + + /// Flushes the updated queues into the storage. + fn flush(self) { + // NOTE we use an explicit method here instead of Drop impl because it has unwanted semantics + // within runtime. It is dangerous to use because of double-panics and flushing on a panic + // is not necessary as well. + for ( + para, + QueueCacheEntry { + queue, + count, + total_size, + }, + ) in self.0 + { + if queue.is_empty() { + // remove the entries altogether. + as Store>::RelayDispatchQueues::remove(¶); + as Store>::RelayDispatchQueueSize::remove(¶); + } else { + as Store>::RelayDispatchQueues::insert(¶, queue); + as Store>::RelayDispatchQueueSize::insert(¶, (count, total_size)); + } + } + } +} + +/// A cursor that iterates over all entries in `NeedsDispatch`. +/// +/// This cursor will start with the para indicated by `NextDispatchRoundStartWith` storage entry. +/// This cursor is cyclic meaning that after reaching the end it will jump to the beginning. Unlike +/// an iterator, this cursor allows removing items during the iteration. +/// +/// Each iteration cycle *must be* concluded with a call to either `advance` or `remove`. +/// +/// This struct is not supposed to be dropped but rather to be consumed by [`flush`]. +#[derive(Debug)] +struct NeedsDispatchCursor { + needs_dispatch: Vec, + cur_idx: usize, +} + +impl NeedsDispatchCursor { + fn new() -> Self { + let needs_dispatch: Vec = as Store>::NeedsDispatch::get(); + let start_with = as Store>::NextDispatchRoundStartWith::get(); + + let start_with_idx = match start_with { + Some(para) => match needs_dispatch.binary_search(¶) { + Ok(found_idx) => found_idx, + Err(_supposed_idx) => { + // well that's weird because we maintain an invariant that + // `NextDispatchRoundStartWith` must point into one of the items in + // `NeedsDispatch`. + // + // let's select 0 as the starting index as a safe bet. + debug_assert!(false); + 0 + } + }, + None => 0, + }; + + Self { + needs_dispatch, + cur_idx: start_with_idx, + } + } + + /// Returns the item the cursor points to. + fn peek(&self) -> Option { + self.needs_dispatch.get(self.cur_idx).cloned() + } + + /// Moves the cursor to the next item. + fn advance(&mut self) { + if self.needs_dispatch.is_empty() { + return; + } + self.cur_idx = (self.cur_idx + 1) % self.needs_dispatch.len(); + } + + /// Removes the item under the cursor. + fn remove(&mut self) { + if self.needs_dispatch.is_empty() { + return; + } + let _ = self.needs_dispatch.remove(self.cur_idx); + + // we might've removed the last element and that doesn't necessarily mean that `needs_dispatch` + // became empty. Reposition the cursor in this case to the beginning. + if self.needs_dispatch.get(self.cur_idx).is_none() { + self.cur_idx = 0; + } + } + + /// Flushes the dispatcher state into the persistent storage. + fn flush(self) { + let next_one = self.peek(); + as Store>::NextDispatchRoundStartWith::set(next_one); + as Store>::NeedsDispatch::put(self.needs_dispatch); + } +} + +#[cfg(test)] +pub(crate) mod mock_sink { + //! An implementation of a mock UMP sink that allows attaching a probe for mocking the weights + //! and checking the sent messages. + //! + //! A default behavior of the UMP sink is to ignore an incoming message and return 0 weight. + //! + //! A probe can be attached to the mock UMP sink. When attached, the mock sink would consult the + //! probe to check whether the received message was expected and what weight it should return. + //! + //! There are two rules on how to use a probe: + //! + //! 1. There can be only one active probe at a time. Creation of another probe while there is + //! already an active one leads to a panic. The probe is scoped to a thread where it was created. + //! + //! 2. All messages expected by the probe must be received by the time of dropping it. Unreceived + //! messages will lead to a panic while dropping a probe. + + use super::{UmpSink, UpwardMessage, ParaId}; + use std::cell::RefCell; + use std::collections::vec_deque::VecDeque; + use frame_support::weights::Weight; + + #[derive(Debug)] + struct UmpExpectation { + expected_origin: ParaId, + expected_msg: UpwardMessage, + mock_weight: Weight, + } + + std::thread_local! { + // `Some` here indicates that there is an active probe. + static HOOK: RefCell>> = RefCell::new(None); + } + + pub struct MockUmpSink; + impl UmpSink for MockUmpSink { + fn process_upward_message(actual_origin: ParaId, actual_msg: Vec) -> Weight { + HOOK.with(|opt_hook| match &mut *opt_hook.borrow_mut() { + Some(hook) => { + let UmpExpectation { + expected_origin, + expected_msg, + mock_weight, + } = match hook.pop_front() { + Some(expectation) => expectation, + None => { + panic!( + "The probe is active but didn't expect the message:\n\n\t{:?}.", + actual_msg, + ); + } + }; + assert_eq!(expected_origin, actual_origin); + assert_eq!(expected_msg, actual_msg); + mock_weight + } + None => 0, + }) + } + } + + pub struct Probe { + _private: (), + } + + impl Probe { + pub fn new() -> Self { + HOOK.with(|opt_hook| { + let prev = opt_hook.borrow_mut().replace(VecDeque::default()); + + // that can trigger if there were two probes were created during one session which + // is may be a bit strict, but may save time figuring out what's wrong. + // if you land here and you do need the two probes in one session consider + // dropping the the existing probe explicitly. + assert!(prev.is_none()); + }); + Self { _private: () } + } + + /// Add an expected message. + /// + /// The enqueued messages are processed in FIFO order. + pub fn assert_msg( + &mut self, + expected_origin: ParaId, + expected_msg: UpwardMessage, + mock_weight: Weight, + ) { + HOOK.with(|opt_hook| { + opt_hook + .borrow_mut() + .as_mut() + .unwrap() + .push_back(UmpExpectation { + expected_origin, + expected_msg, + mock_weight, + }) + }); + } + } + + impl Drop for Probe { + fn drop(&mut self) { + let _ = HOOK.try_with(|opt_hook| { + let prev = opt_hook.borrow_mut().take().expect( + "this probe was created and hasn't been yet destroyed; + the probe cannot be replaced; + there is only one probe at a time allowed; + thus it cannot be `None`; + qed", + ); + + if !prev.is_empty() { + // some messages are left unchecked. We should notify the developer about this. + // however, we do so only if the thread doesn't panic already. Otherwise, the + // developer would get a SIGILL or SIGABRT without a meaningful error message. + if !std::thread::panicking() { + panic!( + "the probe is dropped and not all expected messages arrived: {:?}", + prev + ); + } + } + }); + // an `Err` here signals here that the thread local was already destroyed. + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use super::mock_sink::Probe; + use crate::mock::{Configuration, Ump, new_test_ext, GenesisConfig as MockGenesisConfig}; + use frame_support::IterableStorageMap; + use std::collections::HashSet; + + struct GenesisConfigBuilder { + max_upward_message_size: u32, + max_upward_message_num_per_candidate: u32, + max_upward_queue_count: u32, + max_upward_queue_size: u32, + preferred_dispatchable_upward_messages_step_weight: Weight, + } + + impl Default for GenesisConfigBuilder { + fn default() -> Self { + Self { + max_upward_message_size: 16, + max_upward_message_num_per_candidate: 2, + max_upward_queue_count: 4, + max_upward_queue_size: 64, + preferred_dispatchable_upward_messages_step_weight: 1000, + } + } + } + + impl GenesisConfigBuilder { + fn build(self) -> crate::mock::GenesisConfig { + let mut genesis = default_genesis_config(); + let config = &mut genesis.configuration.config; + + config.max_upward_message_size = self.max_upward_message_size; + config.max_upward_message_num_per_candidate = self.max_upward_message_num_per_candidate; + config.max_upward_queue_count = self.max_upward_queue_count; + config.max_upward_queue_size = self.max_upward_queue_size; + config.preferred_dispatchable_upward_messages_step_weight = + self.preferred_dispatchable_upward_messages_step_weight; + genesis + } + } + + fn default_genesis_config() -> MockGenesisConfig { + MockGenesisConfig { + configuration: crate::configuration::GenesisConfig { + config: crate::configuration::HostConfiguration { + max_downward_message_size: 1024, + ..Default::default() + }, + }, + ..Default::default() + } + } + + fn queue_upward_msg(para: ParaId, msg: UpwardMessage) { + let msgs = vec![msg]; + assert!(Ump::check_upward_messages(&Configuration::config(), para, &msgs).is_ok()); + let _ = Ump::enact_upward_messages(para, msgs); + } + + fn assert_storage_consistency_exhaustive() { + // check that empty queues don't clutter the storage. + for (_para, queue) in ::RelayDispatchQueues::iter() { + assert!(!queue.is_empty()); + } + + // actually count the counts and sizes in queues and compare them to the bookkeeped version. + for (para, queue) in ::RelayDispatchQueues::iter() { + let (expected_count, expected_size) = ::RelayDispatchQueueSize::get(para); + let (actual_count, actual_size) = + queue.into_iter().fold((0, 0), |(acc_count, acc_size), x| { + (acc_count + 1, acc_size + x.len() as u32) + }); + + assert_eq!(expected_count, actual_count); + assert_eq!(expected_size, actual_size); + } + + // since we wipe the empty queues the sets of paras in queue contents, queue sizes and + // need dispatch set should all be equal. + let queue_contents_set = ::RelayDispatchQueues::iter() + .map(|(k, _)| k) + .collect::>(); + let queue_sizes_set = ::RelayDispatchQueueSize::iter() + .map(|(k, _)| k) + .collect::>(); + let needs_dispatch_set = ::NeedsDispatch::get() + .into_iter() + .collect::>(); + assert_eq!(queue_contents_set, queue_sizes_set); + assert_eq!(queue_contents_set, needs_dispatch_set); + + // `NextDispatchRoundStartWith` should point into a para that is tracked. + if let Some(para) = ::NextDispatchRoundStartWith::get() { + assert!(queue_contents_set.contains(¶)); + } + + // `NeedsDispatch` is always sorted. + assert!( + ::NeedsDispatch::get() + .windows(2) + .all(|xs| xs[0] <= xs[1]) + ); + } + + #[test] + fn dispatch_empty() { + new_test_ext(default_genesis_config()).execute_with(|| { + assert_storage_consistency_exhaustive(); + + // make sure that the case with empty queues is handled properly + Ump::process_pending_upward_messages(); + + assert_storage_consistency_exhaustive(); + }); + } + + #[test] + fn dispatch_single_message() { + let a = ParaId::from(228); + let msg = vec![1, 2, 3]; + + new_test_ext(GenesisConfigBuilder::default().build()).execute_with(|| { + let mut probe = Probe::new(); + + probe.assert_msg(a, msg.clone(), 0); + queue_upward_msg(a, msg); + + Ump::process_pending_upward_messages(); + + assert_storage_consistency_exhaustive(); + }); + } + + #[test] + fn dispatch_resume_after_exceeding_dispatch_stage_weight() { + let a = ParaId::from(128); + let c = ParaId::from(228); + let q = ParaId::from(911); + + let a_msg_1 = vec![1, 2, 3]; + let a_msg_2 = vec![3, 2, 1]; + let c_msg_1 = vec![4, 5, 6]; + let c_msg_2 = vec![9, 8, 7]; + let q_msg = b"we are Q".to_vec(); + + new_test_ext( + GenesisConfigBuilder { + preferred_dispatchable_upward_messages_step_weight: 500, + ..Default::default() + } + .build(), + ) + .execute_with(|| { + queue_upward_msg(q, q_msg.clone()); + queue_upward_msg(c, c_msg_1.clone()); + queue_upward_msg(a, a_msg_1.clone()); + queue_upward_msg(a, a_msg_2.clone()); + + assert_storage_consistency_exhaustive(); + + // we expect only two first messages to fit in the first iteration. + { + let mut probe = Probe::new(); + + probe.assert_msg(a, a_msg_1.clone(), 300); + probe.assert_msg(c, c_msg_1.clone(), 300); + Ump::process_pending_upward_messages(); + assert_storage_consistency_exhaustive(); + + drop(probe); + } + + queue_upward_msg(c, c_msg_2.clone()); + assert_storage_consistency_exhaustive(); + + // second iteration should process the second message. + { + let mut probe = Probe::new(); + + probe.assert_msg(q, q_msg.clone(), 500); + Ump::process_pending_upward_messages(); + assert_storage_consistency_exhaustive(); + + drop(probe); + } + + // 3rd iteration. + { + let mut probe = Probe::new(); + + probe.assert_msg(a, a_msg_2.clone(), 100); + probe.assert_msg(c, c_msg_2.clone(), 100); + Ump::process_pending_upward_messages(); + assert_storage_consistency_exhaustive(); + + drop(probe); + } + + // finally, make sure that the queue is empty. + { + let probe = Probe::new(); + + Ump::process_pending_upward_messages(); + assert_storage_consistency_exhaustive(); + + drop(probe); + } + }); + } + + #[test] + fn dispatch_correctly_handle_remove_of_latest() { + let a = ParaId::from(1991); + let b = ParaId::from(1999); + + let a_msg_1 = vec![1, 2, 3]; + let a_msg_2 = vec![3, 2, 1]; + let b_msg_1 = vec![4, 5, 6]; + + new_test_ext( + GenesisConfigBuilder { + preferred_dispatchable_upward_messages_step_weight: 900, + ..Default::default() + } + .build(), + ) + .execute_with(|| { + // We want to test here an edge case, where we remove the queue with the highest + // para id (i.e. last in the needs_dispatch order). + // + // If the last entry was removed we should proceed execution, assuming we still have + // weight available. + + queue_upward_msg(a, a_msg_1.clone()); + queue_upward_msg(a, a_msg_2.clone()); + queue_upward_msg(b, b_msg_1.clone()); + + { + let mut probe = Probe::new(); + + probe.assert_msg(a, a_msg_1.clone(), 300); + probe.assert_msg(b, b_msg_1.clone(), 300); + probe.assert_msg(a, a_msg_2.clone(), 300); + + Ump::process_pending_upward_messages(); + + drop(probe); + } + }); + } +} diff --git a/runtime/parachains/src/util.rs b/runtime/parachains/src/util.rs index 0f6becfd6005b372dd138aecf55adde4f190b3c7..151222cbec16c1fc57ac1b514920ab0bf2d41b44 100644 --- a/runtime/parachains/src/util.rs +++ b/runtime/parachains/src/util.rs @@ -19,30 +19,31 @@ use sp_runtime::traits::{One, Saturating}; use primitives::v1::{Id as ParaId, PersistedValidationData, TransientValidationData}; -use sp_std::prelude::*; -use crate::{configuration, paras, router}; +use crate::{configuration, paras, dmp, hrmp}; /// Make the persisted validation data for a particular parachain. /// /// This ties together the storage of several modules. -pub fn make_persisted_validation_data( +pub fn make_persisted_validation_data( para_id: ParaId, ) -> Option> { + let config = >::config(); let relay_parent_number = >::block_number() - One::one(); Some(PersistedValidationData { parent_head: >::para_head(¶_id)?, block_number: relay_parent_number, - hrmp_mqc_heads: Vec::new(), - dmq_mqc_head: >::dmq_mqc_head(para_id), + hrmp_mqc_heads: >::hrmp_mqc_heads(para_id), + dmq_mqc_head: >::dmq_mqc_head(para_id), + max_pov_size: config.max_pov_size, }) } /// Make the transient validation data for a particular parachain. /// /// This ties together the storage of several modules. -pub fn make_transient_validation_data( +pub fn make_transient_validation_data( para_id: ParaId, ) -> Option> { let config = >::config(); @@ -68,6 +69,6 @@ pub fn make_transient_validation_data( max_head_data_size: config.max_head_data_size, balance: 0, code_upgrade_allowed, - dmq_length: >::dmq_length(para_id), + dmq_length: >::dmq_length(para_id), }) } diff --git a/runtime/polkadot/Cargo.toml b/runtime/polkadot/Cargo.toml index f99ddca6f0e110ac70e3577d0806985a2a8233b3..d825188a68c875fe5763f33e92525c6aab853aa0 100644 --- a/runtime/polkadot/Cargo.toml +++ b/runtime/polkadot/Cargo.toml @@ -1,87 +1,87 @@ [package] name = "polkadot-runtime" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -log = { version = "0.3.9", optional = true } -rustc-hex = { version = "2.0.1", default-features = false } -serde = { version = "1.0.102", default-features = false } -serde_derive = { version = "1.0.102", optional = true } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +log = { version = "0.4.11", optional = true } +rustc-hex = { version = "2.1.0", default-features = false } +serde = { version = "1.0.118", default-features = false } +serde_derive = { version = "1.0.117", optional = true } static_assertions = "1.1.0" -smallvec = "1.4.1" +smallvec = "1.5.1" -authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-elections-phragmen = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -hex-literal = { version = "0.2.1", optional = true } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +hex-literal = { version = "0.3.1", optional = true } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } [dev-dependencies] -hex-literal = "0.2.1" -libsecp256k1 = "0.3.2" -tiny-keccak = "1.5.0" -keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -trie-db = "0.22.0" -serde_json = "1.0.41" +hex-literal = "0.3.1" +libsecp256k1 = "0.3.5" +tiny-keccak = "2.0.2" +keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +trie-db = "0.22.1" +serde_json = "1.0.60" [build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } +substrate-wasm-builder = "3.0.0" [features] default = ["std"] @@ -93,7 +93,7 @@ std = [ "bitvec/std", "primitives/std", "rustc-hex/std", - "codec/std", + "parity-scale-codec/std", "inherents/std", "sp-core/std", "sp-api/std", diff --git a/runtime/polkadot/build.rs b/runtime/polkadot/build.rs index 8c7a1e35dda5e131a2a71e69571ba9fdee06743d..e4a139a06ae1a85fa05f0f90f568a1a7c2839160 100644 --- a/runtime/polkadot/build.rs +++ b/runtime/polkadot/build.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .import_memory() .export_heap_base() .build() diff --git a/runtime/polkadot/src/constants.rs b/runtime/polkadot/src/constants.rs index f784e9fca1ac43e3fb3c0864df2dd4876bf6a61a..d6704f3eb069991580fea8a2dc7902bcd1ca3900 100644 --- a/runtime/polkadot/src/constants.rs +++ b/runtime/polkadot/src/constants.rs @@ -61,7 +61,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, frame_system::MaximumBlockWeight] + /// - [0, MAXIMUM_BLOCK_WEIGHT] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: @@ -87,16 +87,16 @@ pub mod fee { #[cfg(test)] mod tests { use frame_support::weights::WeightToFeePolynomial; - use runtime_common::{MaximumBlockWeight, ExtrinsicBaseWeight}; + use runtime_common::{MAXIMUM_BLOCK_WEIGHT, ExtrinsicBaseWeight}; use super::fee::WeightToFee; use super::currency::{CENTS, DOLLARS, MILLICENTS}; #[test] - // This function tests that the fee for `MaximumBlockWeight` of weight is correct + // This function tests that the fee for `MAXIMUM_BLOCK_WEIGHT` of weight is correct fn full_block_fee_is_correct() { // A full block should cost 16 DOLLARS println!("Base: {}", ExtrinsicBaseWeight::get()); - let x = WeightToFee::calc(&MaximumBlockWeight::get()); + let x = WeightToFee::calc(&MAXIMUM_BLOCK_WEIGHT); let y = 16 * DOLLARS; assert!(x.max(y) - x.min(y) < MILLICENTS); } diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index e2e0b1780ecc1bec72823a24afc65be6139c54b4..01073651f71c9b3b40cc715995ad75d21a09b1c5 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -20,21 +20,23 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] +use pallet_transaction_payment::CurrencyAdapter; use runtime_common::{ claims, SlowAdjustingFeeUpdate, CurrencyToVote, - impls::ToAuthor, - NegativeImbalance, BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, - MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, - MaximumExtrinsicWeight, ParachainSessionKeyPlaceholder, + impls::DealWithFees, + BlockHashCount, RocksDbWeight, BlockWeights, BlockLength, OffchainSolutionWeightLimit, + ParachainSessionKeyPlaceholder, }; use sp_std::prelude::*; +use sp_std::collections::btree_map::BTreeMap; use sp_core::u32_trait::{_1, _2, _3, _4, _5}; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, Signature, ValidationCode, ValidationData, ValidatorId, ValidatorIndex, + InboundDownwardMessage, InboundHrmpMessage, SessionInfo, }; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, ModuleId, ApplyExtrinsicResult, @@ -55,7 +57,7 @@ use sp_core::OpaqueMetadata; use sp_staking::SessionIndex; use frame_support::{ parameter_types, construct_runtime, debug, RuntimeDebug, - traits::{KeyOwnerProofSystem, SplitTwoWays, Randomness, LockIdentifier, Filter}, + traits::{KeyOwnerProofSystem, Randomness, LockIdentifier, Filter}, weights::Weight, }; use frame_system::{EnsureRoot, EnsureOneOf}; @@ -90,7 +92,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("polkadot"), impl_name: create_runtime_str!("parity-polkadot"), authoring_version: 0, - spec_version: 26, + spec_version: 27, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -137,8 +139,10 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = BaseFilter; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; type Origin = Origin; type Call = Call; type Index = Nonce; @@ -150,13 +154,7 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = RocksDbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -166,15 +164,17 @@ impl frame_system::Trait for Runtime { } parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 50; } -impl pallet_scheduler::Trait for Runtime { +impl pallet_scheduler::Config for Runtime { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; type Call = Call; - type MaximumWeight = MaximumBlockWeight; + type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo; @@ -185,7 +185,7 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; @@ -214,7 +214,7 @@ parameter_types! { pub const IndexDeposit: Balance = 10 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; @@ -227,15 +227,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -/// Splits fees 80/20 between treasury and block author. -pub type DealWithFees = SplitTwoWays< - Balance, - NegativeImbalance, - _4, Treasury, // 4 parts (80%) goes to the treasury. - _1, ToAuthor, // 1 part (20%) goes to the block author. ->; - -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; @@ -249,9 +241,8 @@ parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; } -impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = DealWithFees; +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; @@ -260,7 +251,7 @@ impl pallet_transaction_payment::Trait for Runtime { parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -272,7 +263,7 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -293,7 +284,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; @@ -306,7 +297,7 @@ impl pallet_session::Trait for Runtime { type WeightInfo = weights::pallet_session::WeightInfo; } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -339,9 +330,6 @@ parameter_types! { pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 16; pub const MaxIterations: u32 = 10; pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); - pub OffchainSolutionWeightLimit: Weight = MaximumExtrinsicWeight::get() - .saturating_sub(BlockExecutionWeight::get()) - .saturating_sub(ExtrinsicBaseWeight::get()); } type SlashCancelOrigin = EnsureOneOf< @@ -350,7 +338,7 @@ type SlashCancelOrigin = EnsureOneOf< pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective> >; -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVote; @@ -388,7 +376,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl pallet_identity::Trait for Runtime { +impl pallet_identity::Config for Runtime { type Event = Event; type Currency = Balances; type BasicDeposit = BasicDeposit; @@ -417,7 +405,7 @@ parameter_types! { pub const MaxProposals: u32 = 100; } -impl pallet_democracy::Trait for Runtime { +impl pallet_democracy::Config for Runtime { type Proposal = Call; type Event = Event; type Currency = Balances; @@ -486,7 +474,7 @@ parameter_types! { } type CouncilCollective = pallet_collective::Instance1; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -510,7 +498,7 @@ parameter_types! { // Make sure that there are no more than `MaxMembers` members elected via phragmen. const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); -impl pallet_elections_phragmen::Trait for Runtime { +impl pallet_elections_phragmen::Config for Runtime { type Event = Event; type ModuleId = ElectionsPhragmenModuleId; type Currency = Balances; @@ -535,7 +523,7 @@ parameter_types! { } type TechnicalCollective = pallet_collective::Instance2; -impl pallet_collective::Trait for Runtime { +impl pallet_collective::Config for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; @@ -546,7 +534,7 @@ impl pallet_collective::Trait for Runtime { type WeightInfo = weights::pallet_collective::WeightInfo; } -impl pallet_membership::Trait for Runtime { +impl pallet_membership::Config for Runtime { type Event = Event; type AddOrigin = MoreThanHalfCouncil; type RemoveOrigin = MoreThanHalfCouncil; @@ -582,7 +570,7 @@ type ApproveOrigin = EnsureOneOf< pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> >; -impl pallet_treasury::Trait for Runtime { +impl pallet_treasury::Config for Runtime { type ModuleId = TreasuryModuleId; type Currency = Balances; type ApproveOrigin = ApproveOrigin; @@ -609,17 +597,17 @@ impl pallet_treasury::Trait for Runtime { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime {} parameter_types! { pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _; @@ -631,7 +619,7 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); } -impl pallet_im_online::Trait for Runtime { +impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type SessionDuration = SessionDuration; @@ -640,7 +628,7 @@ impl pallet_im_online::Trait for Runtime { type WeightInfo = weights::pallet_im_online::WeightInfo; } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -668,7 +656,7 @@ impl frame_system::offchain::CreateSignedTransaction for R call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { // take the biggest period possible. let period = BlockHashCount::get() @@ -723,7 +711,7 @@ parameter_types! { pub Prefix: &'static [u8] = b"Pay DOTs to the Polkadot account:"; } -impl claims::Trait for Runtime { +impl claims::Config for Runtime { type Event = Event; type VestingSchedule = Vesting; type Prefix = Prefix; @@ -735,7 +723,7 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl pallet_vesting::Trait for Runtime { +impl pallet_vesting::Config for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; @@ -743,7 +731,7 @@ impl pallet_vesting::Trait for Runtime { type WeightInfo = weights::pallet_vesting::WeightInfo; } -impl pallet_utility::Trait for Runtime { +impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; type WeightInfo = weights::pallet_utility::WeightInfo; @@ -757,7 +745,7 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl pallet_multisig::Trait for Runtime { +impl pallet_multisig::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -856,15 +844,21 @@ impl InstanceFilter for ProxyType { Call::Multisig(..) ), ProxyType::Governance => matches!(c, - Call::Democracy(..) | Call::Council(..) | Call::TechnicalCommittee(..) - | Call::ElectionsPhragmen(..) | Call::Treasury(..) | Call::Utility(..) + Call::Democracy(..) | + Call::Council(..) | + Call::TechnicalCommittee(..) | + Call::ElectionsPhragmen(..) | + Call::Treasury(..) | + Call::Utility(..) ), ProxyType::Staking => matches!(c, - Call::Staking(..) | Call::Utility(pallet_utility::Call::batch(..)) | Call::Utility(..) + Call::Staking(..) | + Call::Session(..) | + Call::Utility(..) ), ProxyType::IdentityJudgement => matches!(c, - Call::Identity(pallet_identity::Call::provide_judgement(..)) - | Call::Utility(pallet_utility::Call::batch(..)) + Call::Identity(pallet_identity::Call::provide_judgement(..)) | + Call::Utility(..) ) } } @@ -879,7 +873,7 @@ impl InstanceFilter for ProxyType { } } -impl pallet_proxy::Trait for Runtime { +impl pallet_proxy::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -897,34 +891,7 @@ impl pallet_proxy::Trait for Runtime { pub struct CustomOnRuntimeUpgrade; impl frame_support::traits::OnRuntimeUpgrade for CustomOnRuntimeUpgrade { fn on_runtime_upgrade() -> frame_support::weights::Weight { - // Update scheduler origin usage - #[derive(Encode, Decode)] - #[allow(non_camel_case_types)] - pub enum OldOriginCaller { - system(frame_system::Origin), - pallet_collective_Instance1( - pallet_collective::Origin - ), - pallet_collective_Instance2( - pallet_collective::Origin - ), - } - - impl Into for OldOriginCaller { - fn into(self) -> OriginCaller { - match self { - OldOriginCaller::system(o) => OriginCaller::system(o), - OldOriginCaller::pallet_collective_Instance1(o) => - OriginCaller::pallet_collective_Instance1(o), - OldOriginCaller::pallet_collective_Instance2(o) => - OriginCaller::pallet_collective_Instance2(o), - } - } - } - - pallet_scheduler::Module::::migrate_origin::(); - - ::MaximumBlockWeight::get() + 0 } } @@ -1105,7 +1072,7 @@ sp_api::impl_runtime_apis! { None } - fn check_validation_outputs(_: Id, _: primitives::v1::ValidationOutputs) -> bool { + fn check_validation_outputs(_: Id, _: primitives::v1::CandidateCommitments) -> bool { false } @@ -1113,27 +1080,38 @@ sp_api::impl_runtime_apis! { 0 } + fn session_info(_: SessionIndex) -> Option { + None + } + fn validation_code(_: Id, _: OccupiedCoreAssumption) -> Option { None } - fn candidate_pending_availability(_: Id) -> Option> { + fn historical_validation_code(_: Id, _: BlockNumber) -> Option { None } - fn candidate_events() -> Vec> { - Vec::new() + fn candidate_pending_availability(_: Id) -> Option> { + None } - fn validator_discovery(_: Vec) -> Vec> { + fn candidate_events() -> Vec> { Vec::new() } fn dmq_contents( _recipient: Id, - ) -> Vec> { + ) -> Vec> { Vec::new() } + + fn inbound_hrmp_channels_contents( + _recipient: Id + ) -> BTreeMap>> { + BTreeMap::new() + } + } impl fg_primitives::GrandpaApi for Runtime { @@ -1160,7 +1138,7 @@ sp_api::impl_runtime_apis! { _set_id: fg_primitives::SetId, authority_id: fg_primitives::AuthorityId, ) -> Option { - use codec::Encode; + use parity_scale_codec::Encode; Historical::prove((fg_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -1193,7 +1171,7 @@ sp_api::impl_runtime_apis! { _slot_number: babe_primitives::SlotNumber, authority_id: babe_primitives::AuthorityId, ) -> Option { - use codec::Encode; + use parity_scale_codec::Encode; Historical::prove((babe_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -1259,9 +1237,9 @@ sp_api::impl_runtime_apis! { use pallet_offences_benchmarking::Module as OffencesBench; use frame_system_benchmarking::Module as SystemBench; - impl pallet_session_benchmarking::Trait for Runtime {} - impl pallet_offences_benchmarking::Trait for Runtime {} - impl frame_system_benchmarking::Trait for Runtime {} + impl pallet_session_benchmarking::Config for Runtime {} + impl pallet_offences_benchmarking::Config for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number diff --git a/runtime/polkadot/src/weights/frame_system.rs b/runtime/polkadot/src/weights/frame_system.rs index 7c1162fd52ba47922ee8185ccc0dc57939c05db6..87537327d8210eb0b85e34b792f6fd180c2c6bca 100644 --- a/runtime/polkadot/src/weights/frame_system.rs +++ b/runtime/polkadot/src/weights/frame_system.rs @@ -15,7 +15,24 @@ // along with Polkadot. If not, see . //! Weights for frame_system //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-10-29, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header +// ./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,36 +40,37 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for frame_system. pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - fn remark(_b: u32) -> Weight { - (1_816_000 as Weight) +impl frame_system::WeightInfo for WeightInfo { + fn remark(_b: u32, ) -> Weight { + (1_851_000 as Weight) } fn set_heap_pages() -> Weight { - (2_526_000 as Weight) + (2_436_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_changes_trie_config() -> Weight { - (9_717_000 as Weight) + (11_436_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((790_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((813_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((561_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((545_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_prefix(p: u32, ) -> Weight { (0 as Weight) - .saturating_add((858_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((869_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn suicide() -> Weight { - (35_710_000 as Weight) + (35_460_000 as Weight) } } diff --git a/runtime/polkadot/src/weights/pallet_balances.rs b/runtime/polkadot/src/weights/pallet_balances.rs index e21bc78fb10e44a785c3f1be32afb737f892a7ed..c1998eafcbc78d216dba664fb614a733f4338cfc 100644 --- a/runtime/polkadot/src/weights/pallet_balances.rs +++ b/runtime/polkadot/src/weights/pallet_balances.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_balances +//! Autogenerated weights for pallet_balances +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-08, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_balances +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,30 +40,31 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_balances. pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { +impl pallet_balances::WeightInfo for WeightInfo { fn transfer() -> Weight { - (92_238_000 as Weight) + (93_434_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (63_272_000 as Weight) + (64_060_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (34_822_000 as Weight) + (35_345_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (44_051_000 as Weight) + (44_679_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (90_885_000 as Weight) + (92_521_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_collective.rs b/runtime/polkadot/src/weights/pallet_collective.rs index 3a9ab3b95641ce2641ec906f73514cd8ced0f7f6..52e58a91e092de79eb4ab03058b9b7788b3649eb 100644 --- a/runtime/polkadot/src/weights/pallet_collective.rs +++ b/runtime/polkadot/src/weights/pallet_collective.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_collective +//! Autogenerated weights for pallet_collective +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-08, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_collective +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,77 +40,100 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_collective. pub struct WeightInfo(PhantomData); -impl pallet_collective::WeightInfo for WeightInfo { +impl pallet_collective::WeightInfo for WeightInfo { fn set_members(m: u32, n: u32, p: u32, ) -> Weight { (0 as Weight) - .saturating_add((20_383_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 9_000 + .saturating_add((20_774_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 9_000 .saturating_add((140_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((27_374_000 as Weight).saturating_mul(p as Weight)) + // Standard Error: 9_000 + .saturating_add((28_269_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn execute(b: u32, m: u32, ) -> Weight { - (30_149_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + (31_199_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 .saturating_add((112_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn propose_execute(b: u32, m: u32, ) -> Weight { - (37_476_000 as Weight) + (38_299_000 as Weight) + // Standard Error: 0 .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((217_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((226_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) } fn propose_proposed(b: u32, m: u32, p: u32, ) -> Weight { - (61_144_000 as Weight) + (62_096_000 as Weight) + // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((116_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((566_000 as Weight).saturating_mul(p as Weight)) + // Standard Error: 0 + .saturating_add((120_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((595_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn vote(m: u32, ) -> Weight { - (43_905_000 as Weight) - .saturating_add((277_000 as Weight).saturating_mul(m as Weight)) + (44_252_000 as Weight) + // Standard Error: 0 + .saturating_add((286_000 as Weight).saturating_mul(m as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn close_early_disapproved(m: u32, p: u32, ) -> Weight { - (57_935_000 as Weight) - .saturating_add((215_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((528_000 as Weight).saturating_mul(p as Weight)) + (59_479_000 as Weight) + // Standard Error: 0 + .saturating_add((221_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((549_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_early_approved(b: u32, m: u32, p: u32, ) -> Weight { - (83_159_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((219_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((531_000 as Weight).saturating_mul(p as Weight)) + (85_690_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((223_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((555_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_disapproved(m: u32, p: u32, ) -> Weight { - (64_524_000 as Weight) - .saturating_add((217_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((528_000 as Weight).saturating_mul(p as Weight)) + (65_935_000 as Weight) + // Standard Error: 0 + .saturating_add((225_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((554_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_approved(b: u32, m: u32, p: u32, ) -> Weight { - (88_726_000 as Weight) - .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) - .saturating_add((220_000 as Weight).saturating_mul(m as Weight)) - .saturating_add((534_000 as Weight).saturating_mul(p as Weight)) + (92_386_000 as Weight) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + // Standard Error: 0 + .saturating_add((224_000 as Weight).saturating_mul(m as Weight)) + // Standard Error: 0 + .saturating_add((562_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn disapprove_proposal(p: u32, ) -> Weight { - (34_328_000 as Weight) - .saturating_add((529_000 as Weight).saturating_mul(p as Weight)) + (34_916_000 as Weight) + // Standard Error: 0 + .saturating_add((557_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_democracy.rs b/runtime/polkadot/src/weights/pallet_democracy.rs index 973d3fc718a81551f446b1ed5be38f3dca710a88..156e30c16caed8cfc08d076e8906c56a2c1dca04 100644 --- a/runtime/polkadot/src/weights/pallet_democracy.rs +++ b/runtime/polkadot/src/weights/pallet_democracy.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_democracy +//! Autogenerated weights for pallet_democracy +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-08, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_democracy +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,148 +40,167 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_democracy. pub struct WeightInfo(PhantomData); -impl pallet_democracy::WeightInfo for WeightInfo { +impl pallet_democracy::WeightInfo for WeightInfo { fn propose() -> Weight { - (74_045_000 as Weight) + (78_090_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn second(s: u32, ) -> Weight { - (48_612_000 as Weight) - .saturating_add((191_000 as Weight).saturating_mul(s as Weight)) + (51_177_000 as Weight) + // Standard Error: 0 + .saturating_add((192_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn vote_new(r: u32, ) -> Weight { - (59_513_000 as Weight) - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + (60_433_000 as Weight) + // Standard Error: 0 + .saturating_add((232_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn vote_existing(r: u32, ) -> Weight { - (58_152_000 as Weight) - .saturating_add((229_000 as Weight).saturating_mul(r as Weight)) + (60_023_000 as Weight) + // Standard Error: 0 + .saturating_add((238_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn emergency_cancel() -> Weight { - (36_295_000 as Weight) + (38_461_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn blacklist(p: u32, ) -> Weight { - (116_291_000 as Weight) - .saturating_add((788_000 as Weight).saturating_mul(p as Weight)) + (121_984_000 as Weight) + // Standard Error: 7_000 + .saturating_add((836_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } fn external_propose(v: u32, ) -> Weight { - (18_334_000 as Weight) - .saturating_add((105_000 as Weight).saturating_mul(v as Weight)) + (18_730_000 as Weight) + // Standard Error: 0 + .saturating_add((108_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn external_propose_majority() -> Weight { - (4_338_000 as Weight) + (4_251_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn external_propose_default() -> Weight { - (4_410_000 as Weight) + (4_239_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn fast_track() -> Weight { - (36_946_000 as Weight) + (38_645_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn veto_external(v: u32, ) -> Weight { - (37_948_000 as Weight) - .saturating_add((178_000 as Weight).saturating_mul(v as Weight)) + (39_490_000 as Weight) + // Standard Error: 0 + .saturating_add((184_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn cancel_proposal(p: u32, ) -> Weight { - (81_039_000 as Weight) - .saturating_add((867_000 as Weight).saturating_mul(p as Weight)) + (84_238_000 as Weight) + // Standard Error: 0 + .saturating_add((913_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn cancel_referendum() -> Weight { - (21_665_000 as Weight) + (22_688_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel_queued(r: u32, ) -> Weight { - (40_820_000 as Weight) - .saturating_add((3_372_000 as Weight).saturating_mul(r as Weight)) + (42_080_000 as Weight) + // Standard Error: 1_000 + .saturating_add((3_577_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn on_initialize_base(r: u32, ) -> Weight { - (13_580_000 as Weight) - .saturating_add((6_446_000 as Weight).saturating_mul(r as Weight)) + (16_213_000 as Weight) + // Standard Error: 2_000 + .saturating_add((7_057_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } fn delegate(r: u32, ) -> Weight { - (77_967_000 as Weight) - .saturating_add((9_426_000 as Weight).saturating_mul(r as Weight)) + (79_864_000 as Weight) + // Standard Error: 2_000 + .saturating_add((10_135_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn undelegate(r: u32, ) -> Weight { - (38_680_000 as Weight) - .saturating_add((9_450_000 as Weight).saturating_mul(r as Weight)) + (40_868_000 as Weight) + // Standard Error: 2_000 + .saturating_add((10_138_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(r as Weight))) } fn clear_public_proposals() -> Weight { - (3_625_000 as Weight) + (3_574_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn note_preimage(b: u32, ) -> Weight { - (55_951_000 as Weight) + (57_683_000 as Weight) + // Standard Error: 0 .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn note_imminent_preimage(b: u32, ) -> Weight { - (38_423_000 as Weight) - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + (39_299_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn reap_preimage(b: u32, ) -> Weight { - (51_810_000 as Weight) + (53_857_000 as Weight) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn unlock_remove(r: u32, ) -> Weight { - (49_326_000 as Weight) - .saturating_add((39_000 as Weight).saturating_mul(r as Weight)) + (51_271_000 as Weight) + // Standard Error: 0 + .saturating_add((38_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn unlock_set(r: u32, ) -> Weight { - (45_357_000 as Weight) - .saturating_add((219_000 as Weight).saturating_mul(r as Weight)) + (46_362_000 as Weight) + // Standard Error: 0 + .saturating_add((228_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn remove_vote(r: u32, ) -> Weight { - (26_485_000 as Weight) - .saturating_add((219_000 as Weight).saturating_mul(r as Weight)) + (27_825_000 as Weight) + // Standard Error: 0 + .saturating_add((223_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_other_vote(r: u32, ) -> Weight { - (26_758_000 as Weight) - .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) + (27_987_000 as Weight) + // Standard Error: 0 + .saturating_add((223_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_elections_phragmen.rs b/runtime/polkadot/src/weights/pallet_elections_phragmen.rs index 0646778421a377c907faba0db32824d677c5ed56..0407fe84dd34ad90cdaeace2358d15d4f3f33428 100644 --- a/runtime/polkadot/src/weights/pallet_elections_phragmen.rs +++ b/runtime/polkadot/src/weights/pallet_elections_phragmen.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_elections_phragmen +//! Autogenerated weights for pallet_elections_phragmen +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-08, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_elections_phragmen +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,77 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_elections_phragmen. pub struct WeightInfo(PhantomData); -impl pallet_elections_phragmen::WeightInfo for WeightInfo { +impl pallet_elections_phragmen::WeightInfo for WeightInfo { fn vote(v: u32, ) -> Weight { - (83_041_000 as Weight) - .saturating_add((121_000 as Weight).saturating_mul(v as Weight)) + (88_644_000 as Weight) + // Standard Error: 7_000 + .saturating_add((130_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vote_update(v: u32, ) -> Weight { - (51_338_000 as Weight) - .saturating_add((120_000 as Weight).saturating_mul(v as Weight)) + (54_456_000 as Weight) + // Standard Error: 3_000 + .saturating_add((133_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_voter() -> Weight { - (67_950_000 as Weight) + (71_138_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn report_defunct_voter_correct(c: u32, v: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_692_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((33_039_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 0 + .saturating_add((1_749_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 12_000 + .saturating_add((34_327_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn report_defunct_voter_incorrect(c: u32, v: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_699_000 as Weight).saturating_mul(c as Weight)) - .saturating_add((32_950_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 0 + .saturating_add((1_755_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 9_000 + .saturating_add((34_280_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn submit_candidacy(c: u32, ) -> Weight { - (66_181_000 as Weight) - .saturating_add((277_000 as Weight).saturating_mul(c as Weight)) + (70_892_000 as Weight) + // Standard Error: 0 + .saturating_add((292_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_candidate(c: u32, ) -> Weight { - (41_049_000 as Weight) - .saturating_add((140_000 as Weight).saturating_mul(c as Weight)) + (43_358_000 as Weight) + // Standard Error: 0 + .saturating_add((143_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn renounce_candidacy_members() -> Weight { - (71_936_000 as Weight) + (75_956_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn renounce_candidacy_runners_up() -> Weight { - (44_870_000 as Weight) + (46_888_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_member_with_replacement() -> Weight { - (109_300_000 as Weight) + (116_053_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn remove_member_wrong_refund() -> Weight { - (8_517_000 as Weight) + (9_093_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } } diff --git a/runtime/polkadot/src/weights/pallet_identity.rs b/runtime/polkadot/src/weights/pallet_identity.rs index 21f5f71e30f5426eba9bba1c918134438d583aae..d4e21417b92821355dd25f25afb6bcf5a7414b78 100644 --- a/runtime/polkadot/src/weights/pallet_identity.rs +++ b/runtime/polkadot/src/weights/pallet_identity.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_identity +//! Autogenerated weights for pallet_identity +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-08, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_identity +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,114 +40,139 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_identity. pub struct WeightInfo(PhantomData); -impl pallet_identity::WeightInfo for WeightInfo { +impl pallet_identity::WeightInfo for WeightInfo { fn add_registrar(r: u32, ) -> Weight { - (26_576_000 as Weight) - .saturating_add((303_000 as Weight).saturating_mul(r as Weight)) + (28_261_000 as Weight) + // Standard Error: 3_000 + .saturating_add((318_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_identity(r: u32, x: u32, ) -> Weight { - (70_937_000 as Weight) - .saturating_add((202_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_743_000 as Weight).saturating_mul(x as Weight)) + (73_360_000 as Weight) + // Standard Error: 19_000 + .saturating_add((234_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_000 + .saturating_add((1_863_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_subs_new(s: u32, ) -> Weight { - (50_627_000 as Weight) - .saturating_add((9_326_000 as Weight).saturating_mul(s as Weight)) + (52_544_000 as Weight) + // Standard Error: 1_000 + .saturating_add((9_959_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn set_subs_old(p: u32, ) -> Weight { - (46_994_000 as Weight) - .saturating_add((3_274_000 as Weight).saturating_mul(p as Weight)) + (48_351_000 as Weight) + // Standard Error: 0 + .saturating_add((3_391_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (58_587_000 as Weight) - .saturating_add((205_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_271_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_017_000 as Weight).saturating_mul(x as Weight)) + (62_001_000 as Weight) + // Standard Error: 8_000 + .saturating_add((171_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((3_390_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((1_089_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn request_judgement(r: u32, x: u32, ) -> Weight { - (71_095_000 as Weight) - .saturating_add((317_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_989_000 as Weight).saturating_mul(x as Weight)) + (74_257_000 as Weight) + // Standard Error: 8_000 + .saturating_add((334_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_141_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel_request(r: u32, x: u32, ) -> Weight { - (61_521_000 as Weight) - .saturating_add((171_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_975_000 as Weight).saturating_mul(x as Weight)) + (62_893_000 as Weight) + // Standard Error: 11_000 + .saturating_add((231_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_117_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_fee(r: u32, ) -> Weight { - (10_490_000 as Weight) - .saturating_add((256_000 as Weight).saturating_mul(r as Weight)) + (10_890_000 as Weight) + // Standard Error: 1_000 + .saturating_add((268_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_account_id(r: u32, ) -> Weight { - (11_703_000 as Weight) - .saturating_add((259_000 as Weight).saturating_mul(r as Weight)) + (12_410_000 as Weight) + // Standard Error: 1_000 + .saturating_add((268_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_fields(r: u32, ) -> Weight { - (10_525_000 as Weight) - .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + (10_855_000 as Weight) + // Standard Error: 1_000 + .saturating_add((269_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn provide_judgement(r: u32, x: u32, ) -> Weight { - (47_790_000 as Weight) - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_982_000 as Weight).saturating_mul(x as Weight)) + (49_519_000 as Weight) + // Standard Error: 9_000 + .saturating_add((299_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_127_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - fn kill_identity(r: u32, s: u32, x: u32 ) -> Weight { - (96_980_000 as Weight) - .saturating_add((210_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_285_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + (103_419_000 as Weight) + // Standard Error: 5_000 + .saturating_add((120_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((3_400_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn add_sub(s: u32, ) -> Weight { - (71_394_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(s as Weight)) + (72_490_000 as Weight) + // Standard Error: 0 + .saturating_add((191_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn rename_sub(s: u32, ) -> Weight { - (23_806_000 as Weight) - .saturating_add((26_000 as Weight).saturating_mul(s as Weight)) + (23_454_000 as Weight) + // Standard Error: 0 + .saturating_add((25_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_sub(s: u32, ) -> Weight { - (67_756_000 as Weight) - .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) + (69_012_000 as Weight) + // Standard Error: 0 + .saturating_add((164_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn quit_sub(s: u32, ) -> Weight { - (44_687_000 as Weight) - .saturating_add((157_000 as Weight).saturating_mul(s as Weight)) + (45_725_000 as Weight) + // Standard Error: 0 + .saturating_add((158_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_im_online.rs b/runtime/polkadot/src/weights/pallet_im_online.rs index 07fca4c93543ddc31d8befdbd31353755aed77db..1e9a2b2f20ad61c43a3ee625cb812d919627164c 100644 --- a/runtime/polkadot/src/weights/pallet_im_online.rs +++ b/runtime/polkadot/src/weights/pallet_im_online.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_im_online +//! Autogenerated weights for pallet_im_online +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_im_online +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,12 +40,15 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_im_online. pub struct WeightInfo(PhantomData); -impl pallet_im_online::WeightInfo for WeightInfo { +impl pallet_im_online::WeightInfo for WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (109_072_000 as Weight) - .saturating_add((216_000 as Weight).saturating_mul(k as Weight)) - .saturating_add((481_000 as Weight).saturating_mul(e as Weight)) + (111_740_000 as Weight) + // Standard Error: 0 + .saturating_add((217_000 as Weight).saturating_mul(k as Weight)) + // Standard Error: 1_000 + .saturating_add((510_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_indices.rs b/runtime/polkadot/src/weights/pallet_indices.rs index 7c45997f2f07c1d3ff55389fb729778387a95783..28bd0c88a71a5842231840d411db5a84ac81dab5 100644 --- a/runtime/polkadot/src/weights/pallet_indices.rs +++ b/runtime/polkadot/src/weights/pallet_indices.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_indices +//! Autogenerated weights for pallet_indices +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_indices +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,30 +40,31 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_indices. pub struct WeightInfo(PhantomData); -impl pallet_indices::WeightInfo for WeightInfo { +impl pallet_indices::WeightInfo for WeightInfo { fn claim() -> Weight { - (50_234_000 as Weight) + (52_777_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer() -> Weight { - (55_731_000 as Weight) + (59_482_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn free() -> Weight { - (44_823_000 as Weight) + (48_062_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (46_183_000 as Weight) + (49_541_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn freeze() -> Weight { - (41_829_000 as Weight) + (45_151_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_multisig.rs b/runtime/polkadot/src/weights/pallet_multisig.rs index ab0c619bf26cbfab56ead44694e7ca319d9aca25..c7582995d07fd641a5d79b7e694dc1c892b88b25 100644 --- a/runtime/polkadot/src/weights/pallet_multisig.rs +++ b/runtime/polkadot/src/weights/pallet_multisig.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_multisig +//! Autogenerated weights for pallet_multisig +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_multisig +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,84 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_multisig. pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { +impl pallet_multisig::WeightInfo for WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight { - (12_481_000 as Weight) + (12_532_000 as Weight) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) } fn as_multi_create(s: u32, z: u32, ) -> Weight { - (65_650_000 as Weight) - .saturating_add((92_000 as Weight).saturating_mul(s as Weight)) + (70_460_000 as Weight) + // Standard Error: 0 + .saturating_add((86_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (74_115_000 as Weight) - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + (79_056_000 as Weight) + // Standard Error: 0 + .saturating_add((90_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (40_671_000 as Weight) - .saturating_add((111_000 as Weight).saturating_mul(s as Weight)) + (42_175_000 as Weight) + // Standard Error: 0 + .saturating_add((113_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (71_781_000 as Weight) + (75_726_000 as Weight) + // Standard Error: 0 .saturating_add((126_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (83_125_000 as Weight) - .saturating_add((248_000 as Weight).saturating_mul(s as Weight)) + (87_543_000 as Weight) + // Standard Error: 0 + .saturating_add((247_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn approve_as_multi_create(s: u32, ) -> Weight { - (65_465_000 as Weight) - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + (69_831_000 as Weight) + // Standard Error: 0 + .saturating_add((89_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_as_multi_approve(s: u32, ) -> Weight { - (39_362_000 as Weight) - .saturating_add((113_000 as Weight).saturating_mul(s as Weight)) + (41_395_000 as Weight) + // Standard Error: 0 + .saturating_add((111_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_as_multi_complete(s: u32, ) -> Weight { - (154_095_000 as Weight) + (162_511_000 as Weight) + // Standard Error: 0 .saturating_add((249_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn cancel_as_multi(s: u32, ) -> Weight { - (107_420_000 as Weight) - .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + (112_698_000 as Weight) + // Standard Error: 0 + .saturating_add((90_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_proxy.rs b/runtime/polkadot/src/weights/pallet_proxy.rs index 36cbb94ad6efeb1aab9406d81bad917a1ef12842..0ea750d212aae3913dfd4fc0ac43ea504c6e4cba 100644 --- a/runtime/polkadot/src/weights/pallet_proxy.rs +++ b/runtime/polkadot/src/weights/pallet_proxy.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_proxy +//! Autogenerated weights for pallet_proxy +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_proxy +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,83 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_proxy. pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { +impl pallet_proxy::WeightInfo for WeightInfo { fn proxy(p: u32, ) -> Weight { - (30_453_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(p as Weight)) + (31_560_000 as Weight) + // Standard Error: 1_000 + .saturating_add((190_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn proxy_announced(a: u32, p: u32, ) -> Weight { - (64_472_000 as Weight) - .saturating_add((799_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((182_000 as Weight).saturating_mul(p as Weight)) + (65_555_000 as Weight) + // Standard Error: 1_000 + .saturating_add((843_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((194_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_announcement(a: u32, p: u32, ) -> Weight { - (41_241_000 as Weight) - .saturating_add((792_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(p as Weight)) + (41_808_000 as Weight) + // Standard Error: 1_000 + .saturating_add((842_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((10_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn reject_announcement(a: u32, p: u32, ) -> Weight { - (40_818_000 as Weight) - .saturating_add((799_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((10_000 as Weight).saturating_mul(p as Weight)) + (41_713_000 as Weight) + // Standard Error: 1_000 + .saturating_add((847_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((12_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn announce(a: u32, p: u32, ) -> Weight { - (64_919_000 as Weight) - .saturating_add((695_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((191_000 as Weight).saturating_mul(p as Weight)) + (66_579_000 as Weight) + // Standard Error: 1_000 + .saturating_add((730_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((199_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn add_proxy(p: u32, ) -> Weight { - (43_889_000 as Weight) - .saturating_add((190_000 as Weight).saturating_mul(p as Weight)) + (44_930_000 as Weight) + // Standard Error: 1_000 + .saturating_add((206_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_proxy(p: u32, ) -> Weight { - (39_338_000 as Weight) - .saturating_add((228_000 as Weight).saturating_mul(p as Weight)) + (40_436_000 as Weight) + // Standard Error: 1_000 + .saturating_add((241_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_proxies(p: u32, ) -> Weight { - (37_787_000 as Weight) - .saturating_add((182_000 as Weight).saturating_mul(p as Weight)) + (38_695_000 as Weight) + // Standard Error: 1_000 + .saturating_add((191_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn anonymous(p: u32, ) -> Weight { - (62_203_000 as Weight) - .saturating_add((22_000 as Weight).saturating_mul(p as Weight)) + (64_695_000 as Weight) + // Standard Error: 1_000 + .saturating_add((13_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn kill_anonymous(p: u32, ) -> Weight { - (40_398_000 as Weight) - .saturating_add((182_000 as Weight).saturating_mul(p as Weight)) + (41_503_000 as Weight) + // Standard Error: 1_000 + .saturating_add((192_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_scheduler.rs b/runtime/polkadot/src/weights/pallet_scheduler.rs index f360917dd49c13ca76f3d62855cb957ddc872997..d3fb9ff8b47eab901b1f2ed6f0991f4afae5258f 100644 --- a/runtime/polkadot/src/weights/pallet_scheduler.rs +++ b/runtime/polkadot/src/weights/pallet_scheduler.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_scheduler +//! Autogenerated weights for pallet_scheduler +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_scheduler +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,29 +40,34 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_scheduler. pub struct WeightInfo(PhantomData); -impl pallet_scheduler::WeightInfo for WeightInfo { +impl pallet_scheduler::WeightInfo for WeightInfo { fn schedule(s: u32, ) -> Weight { - (33_047_000 as Weight) - .saturating_add((47_000 as Weight).saturating_mul(s as Weight)) + (34_190_000 as Weight) + // Standard Error: 0 + .saturating_add((41_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel(s: u32, ) -> Weight { - (30_172_000 as Weight) - .saturating_add((3_053_000 as Weight).saturating_mul(s as Weight)) + (31_368_000 as Weight) + // Standard Error: 7_000 + .saturating_add((3_230_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn schedule_named(s: u32, ) -> Weight { - (42_799_000 as Weight) - .saturating_add((64_000 as Weight).saturating_mul(s as Weight)) + (44_444_000 as Weight) + // Standard Error: 1_000 + .saturating_add((55_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn cancel_named(s: u32, ) -> Weight { - (34_134_000 as Weight) - .saturating_add((3_064_000 as Weight).saturating_mul(s as Weight)) + (35_660_000 as Weight) + // Standard Error: 7_000 + .saturating_add((3_238_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_session.rs b/runtime/polkadot/src/weights/pallet_session.rs index 4732454bc2116b2d8e61ff9b6f7f328fb531dcf5..f496e32ba510f95c4c85c5dbbe664c1728abc96b 100644 --- a/runtime/polkadot/src/weights/pallet_session.rs +++ b/runtime/polkadot/src/weights/pallet_session.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_session +//! Autogenerated weights for pallet_session +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_session +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,15 +40,16 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_session. pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { +impl pallet_session::WeightInfo for WeightInfo { fn set_keys() -> Weight { - (89_552_000 as Weight) + (95_877_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } fn purge_keys() -> Weight { - (53_122_000 as Weight) + (56_080_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } diff --git a/runtime/polkadot/src/weights/pallet_staking.rs b/runtime/polkadot/src/weights/pallet_staking.rs index 35e2ebe2a861c575e27b46799b62c844daea4b86..134f87b140bba4335c4e38f52da1ec7a01c19686 100644 --- a/runtime/polkadot/src/weights/pallet_staking.rs +++ b/runtime/polkadot/src/weights/pallet_staking.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_staking +//! Autogenerated weights for pallet_staking +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-30, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_staking +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,137 +40,151 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_staking. pub struct WeightInfo(PhantomData); -impl pallet_staking::WeightInfo for WeightInfo { +impl pallet_staking::WeightInfo for WeightInfo { fn bond() -> Weight { - (95_041_000 as Weight) + (98_601_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (73_981_000 as Weight) + (78_522_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (67_107_000 as Weight) + (70_546_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (67_795_000 as Weight) - .saturating_add((32_000 as Weight).saturating_mul(s as Weight)) + (71_228_000 as Weight) + // Standard Error: 0 + .saturating_add((33_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (110_537_000 as Weight) - .saturating_add((3_879_000 as Weight).saturating_mul(s as Weight)) + (116_427_000 as Weight) + // Standard Error: 1_000 + .saturating_add((4_046_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (22_802_000 as Weight) + (24_212_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn nominate(n: u32, ) -> Weight { - (29_784_000 as Weight) - .saturating_add((401_000 as Weight).saturating_mul(n as Weight)) + (31_922_000 as Weight) + // Standard Error: 12_000 + .saturating_add((418_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (22_719_000 as Weight) + (24_183_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (15_354_000 as Weight) + (16_569_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (33_783_000 as Weight) + (35_580_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (3_126_000 as Weight) + (3_217_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (3_548_000 as Weight) + (3_688_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (3_474_000 as Weight) + (3_739_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (3_518_000 as Weight) + (3_611_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (3_639_000 as Weight) + (3_864_000 as Weight) + // Standard Error: 0 .saturating_add((9_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (75_900_000 as Weight) - .saturating_add((3_891_000 as Weight).saturating_mul(s as Weight)) + (78_757_000 as Weight) + // Standard Error: 1_000 + .saturating_add((4_037_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_844_744_000 as Weight) - .saturating_add((34_644_000 as Weight).saturating_mul(s as Weight)) + (5_845_293_000 as Weight) + // Standard Error: 388_000 + .saturating_add((34_621_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (145_214_000 as Weight) - .saturating_add((57_875_000 as Weight).saturating_mul(n as Weight)) + (142_251_000 as Weight) + // Standard Error: 11_000 + .saturating_add((60_125_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (173_115_000 as Weight) - .saturating_add((76_912_000 as Weight).saturating_mul(n as Weight)) + (172_317_000 as Weight) + // Standard Error: 17_000 + .saturating_add((78_585_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(12 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (46_569_000 as Weight) - .saturating_add((104_000 as Weight).saturating_mul(l as Weight)) + (48_379_000 as Weight) + // Standard Error: 1_000 + .saturating_add((109_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - .saturating_add((36_641_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 64_000 + .saturating_add((39_072_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (95_718_000 as Weight) - .saturating_add((3_875_000 as Weight).saturating_mul(s as Weight)) + (100_021_000 as Weight) + // Standard Error: 1_000 + .saturating_add((4_046_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_262_144_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((143_471_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) + // Standard Error: 759_000 + .saturating_add((751_624_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 38_000 + .saturating_add((106_491_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(8 as Weight)) @@ -161,10 +192,14 @@ impl pallet_staking::WeightInfo for WeightInfo { } fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { (0 as Weight) - .saturating_add((879_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((488_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((133_102_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((8_073_000 as Weight).saturating_mul(w as Weight)) + // Standard Error: 44_000 + .saturating_add((1_321_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 17_000 + .saturating_add((535_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 44_000 + .saturating_add((102_449_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 91_000 + .saturating_add((7_907_000 as Weight).saturating_mul(w as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) diff --git a/runtime/polkadot/src/weights/pallet_timestamp.rs b/runtime/polkadot/src/weights/pallet_timestamp.rs index 44828204ad581819b004a0814b6bbb68e0326410..4ebf6c1988c247643a6781c848f3b53c48981c9d 100644 --- a/runtime/polkadot/src/weights/pallet_timestamp.rs +++ b/runtime/polkadot/src/weights/pallet_timestamp.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_timestamp +//! Autogenerated weights for pallet_timestamp +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_timestamp +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,14 +40,15 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_timestamp. pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { +impl pallet_timestamp::WeightInfo for WeightInfo { fn set() -> Weight { - (11_029_000 as Weight) + (11_397_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_finalize() -> Weight { - (6_128_000 as Weight) + (6_096_000 as Weight) } } diff --git a/runtime/polkadot/src/weights/pallet_treasury.rs b/runtime/polkadot/src/weights/pallet_treasury.rs index bd0b383207ca1c23f488ec32b0f18e9cf30049de..1075855224af7c78ee1ecb72fca4c844d859b5db 100644 --- a/runtime/polkadot/src/weights/pallet_treasury.rs +++ b/runtime/polkadot/src/weights/pallet_treasury.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_treasury +//! Autogenerated weights for pallet_treasury +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_treasury +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,115 +40,124 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_treasury. pub struct WeightInfo(PhantomData); -impl pallet_treasury::WeightInfo for WeightInfo { +impl pallet_treasury::WeightInfo for WeightInfo { fn propose_spend() -> Weight { - (53_519_000 as Weight) + (54_574_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn reject_proposal() -> Weight { - (84_266_000 as Weight) + (85_554_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn approve_proposal() -> Weight { - (12_037_000 as Weight) + (11_791_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn report_awesome(r: u32, ) -> Weight { - (66_224_000 as Weight) + (67_174_000 as Weight) + // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn retract_tip() -> Weight { - (55_293_000 as Weight) + (56_466_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn tip_new(r: u32, t: u32, ) -> Weight { - (41_247_000 as Weight) + (42_193_000 as Weight) + // Standard Error: 0 .saturating_add((2_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((151_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 0 + .saturating_add((149_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn tip(t: u32, ) -> Weight { - (28_134_000 as Weight) - .saturating_add((699_000 as Weight).saturating_mul(t as Weight)) + (28_405_000 as Weight) + // Standard Error: 1_000 + .saturating_add((705_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn close_tip(t: u32, ) -> Weight { - (103_484_000 as Weight) - .saturating_add((364_000 as Weight).saturating_mul(t as Weight)) + (106_075_000 as Weight) + // Standard Error: 0 + .saturating_add((361_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn propose_bounty(d: u32, ) -> Weight { - (56_289_000 as Weight) + (58_025_000 as Weight) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn approve_bounty() -> Weight { - (15_540_000 as Weight) + (15_338_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn propose_curator() -> Weight { - (12_461_000 as Weight) + (12_080_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn unassign_curator() -> Weight { - (67_845_000 as Weight) + (69_557_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn accept_curator() -> Weight { - (47_132_000 as Weight) + (47_745_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn award_bounty() -> Weight { - (33_731_000 as Weight) + (33_468_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn claim_bounty() -> Weight { - (157_087_000 as Weight) + (161_390_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } fn close_bounty_proposed() -> Weight { - (66_675_000 as Weight) + (68_016_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn close_bounty_active() -> Weight { - (104_221_000 as Weight) + (106_292_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn extend_bounty_expiry() -> Weight { - (32_126_000 as Weight) + (32_515_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_initialize_proposals(p: u32, ) -> Weight { - (67_274_000 as Weight) - .saturating_add((69_725_000 as Weight).saturating_mul(p as Weight)) + (72_159_000 as Weight) + // Standard Error: 26_000 + .saturating_add((72_167_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(p as Weight))) } fn on_initialize_bounties(b: u32, ) -> Weight { - (60_746_000 as Weight) - .saturating_add((69_346_000 as Weight).saturating_mul(b as Weight)) + (66_526_000 as Weight) + // Standard Error: 18_000 + .saturating_add((71_398_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(b as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) diff --git a/runtime/polkadot/src/weights/pallet_utility.rs b/runtime/polkadot/src/weights/pallet_utility.rs index 31fe14f110405533e37f85175ebf56c9d5ec2c51..fd10a605de3d591edc8475b887ec58939bb42ebe 100644 --- a/runtime/polkadot/src/weights/pallet_utility.rs +++ b/runtime/polkadot/src/weights/pallet_utility.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_utility +//! Autogenerated weights for pallet_utility +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_utility +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,17 +40,20 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_utility. pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { +impl pallet_utility::WeightInfo for WeightInfo { fn batch(c: u32, ) -> Weight { - (18_589_000 as Weight) - .saturating_add((1_734_000 as Weight).saturating_mul(c as Weight)) + (19_701_000 as Weight) + // Standard Error: 0 + .saturating_add((2_118_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - (5_611_000 as Weight) + (5_534_000 as Weight) } fn batch_all(c: u32, ) -> Weight { - (21_104_000 as Weight) - .saturating_add((1_509_000 as Weight).saturating_mul(c as Weight)) + (20_354_000 as Weight) + // Standard Error: 0 + .saturating_add((2_124_000 as Weight).saturating_mul(c as Weight)) } } diff --git a/runtime/polkadot/src/weights/pallet_vesting.rs b/runtime/polkadot/src/weights/pallet_vesting.rs index 0215020eafcbaecd77be381897c255c6dec08eef..ba41d013d1809ffff03efd164f7064f4b00f26a8 100644 --- a/runtime/polkadot/src/weights/pallet_vesting.rs +++ b/runtime/polkadot/src/weights/pallet_vesting.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_vesting +//! Autogenerated weights for pallet_vesting +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=polkadot-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_vesting +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/polkadot/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,41 +40,48 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_vesting. pub struct WeightInfo(PhantomData); -impl pallet_vesting::WeightInfo for WeightInfo { +impl pallet_vesting::WeightInfo for WeightInfo { fn vest_locked(l: u32, ) -> Weight { - (54_300_000 as Weight) - .saturating_add((210_000 as Weight).saturating_mul(l as Weight)) + (55_961_000 as Weight) + // Standard Error: 0 + .saturating_add((138_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn vest_unlocked(l: u32, ) -> Weight { - (57_381_000 as Weight) - .saturating_add((104_000 as Weight).saturating_mul(l as Weight)) + (60_522_000 as Weight) + // Standard Error: 2_000 + .saturating_add((107_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vest_other_locked(l: u32, ) -> Weight { - (54_130_000 as Weight) - .saturating_add((215_000 as Weight).saturating_mul(l as Weight)) + (55_712_000 as Weight) + // Standard Error: 0 + .saturating_add((135_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vest_other_unlocked(l: u32, ) -> Weight { - (57_208_000 as Weight) - .saturating_add((101_000 as Weight).saturating_mul(l as Weight)) + (59_981_000 as Weight) + // Standard Error: 2_000 + .saturating_add((113_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn vested_transfer(l: u32, ) -> Weight { - (117_560_000 as Weight) - .saturating_add((249_000 as Weight).saturating_mul(l as Weight)) + (122_684_000 as Weight) + // Standard Error: 8_000 + .saturating_add((171_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn force_vested_transfer(l: u32, ) -> Weight { - (116_476_000 as Weight) - .saturating_add((253_000 as Weight).saturating_mul(l as Weight)) + (121_973_000 as Weight) + // Standard Error: 8_000 + .saturating_add((165_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } diff --git a/runtime/rococo-v1/Cargo.toml b/runtime/rococo/Cargo.toml similarity index 58% rename from runtime/rococo-v1/Cargo.toml rename to runtime/rococo/Cargo.toml index 99162b757426a18172f3cb632588b0fa9d5be08b..8a0b123d6f87ae015d7cb7c2a45a76e9a710a3f2 100644 --- a/runtime/rococo-v1/Cargo.toml +++ b/runtime/rococo/Cargo.toml @@ -1,51 +1,52 @@ [package] -name = "rococo-v1-runtime" -version = "0.8.26" +name = "rococo-runtime" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -serde = { version = "1.0.102", default-features = false } -serde_derive = { version = "1.0.102", optional = true } -smallvec = "1.4.1" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +serde = { version = "1.0.118", default-features = false } +serde_derive = { version = "1.0.117", optional = true } +smallvec = "1.5.1" -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "master" } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -53,7 +54,7 @@ polkadot-parachain = { path = "../../parachain", default-features = false } runtime-parachains = { package = "polkadot-runtime-parachains", path = "../parachains", default-features = false } [build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } +substrate-wasm-builder = "3.0.0" [features] default = ["std"] @@ -65,9 +66,10 @@ std = [ "pallet-babe/std", "babe-primitives/std", "pallet-balances/std", - "codec/std", + "parity-scale-codec/std", "frame-executive/std", "pallet-grandpa/std", + "pallet-sudo/std", "pallet-indices/std", "pallet-im-online/std", "inherents/std", diff --git a/runtime/rococo-v1/README.md b/runtime/rococo/README.md similarity index 100% rename from runtime/rococo-v1/README.md rename to runtime/rococo/README.md diff --git a/runtime/rococo-v1/build.rs b/runtime/rococo/build.rs similarity index 91% rename from runtime/rococo-v1/build.rs rename to runtime/rococo/build.rs index dff1419829974d5c86b0c765974413d040d661a8..f287ec0e1eea35ba59531a7f1f5c4b89bd31e227 100644 --- a/runtime/rococo-v1/build.rs +++ b/runtime/rococo/build.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.0") .import_memory() .export_heap_base() .build() diff --git a/runtime/rococo-v1/src/constants.rs b/runtime/rococo/src/constants.rs similarity index 88% rename from runtime/rococo-v1/src/constants.rs rename to runtime/rococo/src/constants.rs index a565ca4dbcfde6dc9d1e625e938bf69928b017b1..616c28de1e41b5d5e777ac90cf7b04798d2f1fb3 100644 --- a/runtime/rococo-v1/src/constants.rs +++ b/runtime/rococo/src/constants.rs @@ -33,7 +33,8 @@ pub mod time { use primitives::v0::{Moment, BlockNumber}; pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; - pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 1 * HOURS; + // 30 seconds for now + pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = MINUTES / 2; // These time units are defined in number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); @@ -86,8 +87,8 @@ pub mod fee { #[cfg(test)] mod tests { - use frame_support::weights::WeightToFeePolynomial; - use runtime_common::{MaximumBlockWeight, ExtrinsicBaseWeight}; + use frame_support::weights::{WeightToFeePolynomial, DispatchClass}; + use runtime_common::BlockWeights; use super::fee::WeightToFee; use super::currency::{CENTS, DOLLARS, MILLICENTS}; @@ -95,8 +96,8 @@ mod tests { // This function tests that the fee for `MaximumBlockWeight` of weight is correct fn full_block_fee_is_correct() { // A full block should cost 16 DOLLARS - println!("Base: {}", ExtrinsicBaseWeight::get()); - let x = WeightToFee::calc(&MaximumBlockWeight::get()); + println!("Base: {}", BlockWeights::get().get(DispatchClass::Normal).base_extrinsic); + let x = WeightToFee::calc(&BlockWeights::get().max_block); let y = 16 * DOLLARS; assert!(x.max(y) - x.min(y) < MILLICENTS); } @@ -105,8 +106,9 @@ mod tests { // This function tests that the fee for `ExtrinsicBaseWeight` of weight is correct fn extrinsic_base_fee_is_correct() { // `ExtrinsicBaseWeight` should cost 1/10 of a CENT - println!("Base: {}", ExtrinsicBaseWeight::get()); - let x = WeightToFee::calc(&ExtrinsicBaseWeight::get()); + let base_weight = BlockWeights::get().get(DispatchClass::Normal).base_extrinsic; + println!("Base: {}", base_weight); + let x = WeightToFee::calc(&base_weight); let y = CENTS / 10; assert!(x.max(y) - x.min(y) < MILLICENTS); } diff --git a/runtime/rococo-v1/src/lib.rs b/runtime/rococo/src/lib.rs similarity index 87% rename from runtime/rococo-v1/src/lib.rs rename to runtime/rococo/src/lib.rs index f6195e48f5b9df2f8c33be025c14d751a287cc9b..1a8d9cfdf8c1cc8e01aa4c3d2c7c56cea41beb7a 100644 --- a/runtime/rococo-v1/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -20,19 +20,20 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] +use pallet_transaction_payment::CurrencyAdapter; use sp_std::prelude::*; -use codec::Encode; +use sp_std::collections::btree_map::BTreeMap; +use parity_scale_codec::Encode; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, Hash, Nonce, Signature, Moment, GroupRotationInfo, CoreState, Id, ValidationData, ValidationCode, CandidateEvent, ValidatorId, ValidatorIndex, CommittedCandidateReceipt, OccupiedCoreAssumption, - PersistedValidationData, + PersistedValidationData, InboundDownwardMessage, InboundHrmpMessage, SessionInfo, }; use runtime_common::{ SlowAdjustingFeeUpdate, impls::ToAuthor, - BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, MaximumBlockLength, - BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, MaximumExtrinsicWeight, + BlockHashCount, BlockWeights, BlockLength, RocksDbWeight, OffchainSolutionWeightLimit, }; use runtime_parachains::{ self, @@ -70,8 +71,11 @@ use runtime_parachains::configuration as parachains_configuration; use runtime_parachains::inclusion as parachains_inclusion; use runtime_parachains::inclusion_inherent as parachains_inclusion_inherent; use runtime_parachains::initializer as parachains_initializer; +use runtime_parachains::session_info as parachains_session_info; use runtime_parachains::paras as parachains_paras; -use runtime_parachains::router as parachains_router; +use runtime_parachains::dmp as parachains_dmp; +use runtime_parachains::ump as parachains_ump; +use runtime_parachains::hrmp as parachains_hrmp; use runtime_parachains::scheduler as parachains_scheduler; pub use pallet_balances::Call as BalancesCall; @@ -85,6 +89,29 @@ use constants::{time::*, currency::*, fee::*}; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Runtime version (Rococo). +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("rococo"), + impl_name: create_runtime_str!("parity-rococo-v1"), + authoring_version: 0, + spec_version: 10, + impl_version: 0, + #[cfg(not(feature = "disable-runtime-api"))] + apis: RUNTIME_API_VERSIONS, + #[cfg(feature = "disable-runtime-api")] + apis: sp_version::create_apis_vec![[]], + transaction_version: 0, +}; + +/// Native version. +#[cfg(any(feature = "std", test))] +pub fn native_version() -> NativeVersion { + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } +} + /// The address format for describing accounts. pub type Address = AccountId; /// Block header type as expected by this runtime. @@ -106,242 +133,6 @@ pub type SignedExtra = ( pallet_transaction_payment::ChargeTransactionPayment, ); -#[cfg(not(feature = "disable-runtime-api"))] -sp_api::impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() - } - } - - impl block_builder_api::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: inherents::InherentData, - ) -> inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - - fn random_seed() -> ::Hash { - Babe::randomness().into() - } - } - - impl tx_pool_api::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx) - } - } - - impl offchain_primitives::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl primitives::v1::ParachainHost for Runtime { - fn validators() -> Vec { - runtime_api_impl::validators::() - } - - fn validator_groups() -> (Vec>, GroupRotationInfo) { - runtime_api_impl::validator_groups::() - } - - fn availability_cores() -> Vec> { - runtime_api_impl::availability_cores::() - } - - fn full_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) - -> Option> { - runtime_api_impl::full_validation_data::(para_id, assumption) - } - - fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) - -> Option> { - runtime_api_impl::persisted_validation_data::(para_id, assumption) - } - - fn check_validation_outputs( - para_id: Id, - outputs: primitives::v1::ValidationOutputs, - ) -> bool { - runtime_api_impl::check_validation_outputs::(para_id, outputs) - } - - fn session_index_for_child() -> SessionIndex { - runtime_api_impl::session_index_for_child::() - } - - fn validation_code(para_id: Id, assumption: OccupiedCoreAssumption) - -> Option { - runtime_api_impl::validation_code::(para_id, assumption) - } - - fn candidate_pending_availability(para_id: Id) -> Option> { - runtime_api_impl::candidate_pending_availability::(para_id) - } - - fn candidate_events() -> Vec> { - runtime_api_impl::candidate_events::(|ev| { - match ev { - Event::parachains_inclusion(ev) => { - Some(ev) - } - _ => None, - } - }) - } - fn validator_discovery(validators: Vec) -> Vec> { - runtime_api_impl::validator_discovery::(validators) - } - - fn dmq_contents( - recipient: Id, - ) -> Vec> { - runtime_api_impl::dmq_contents::(recipient) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn grandpa_authorities() -> Vec<(GrandpaId, u64)> { - Grandpa::grandpa_authorities() - } - - fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: fg_primitives::EquivocationProof< - ::Hash, - sp_runtime::traits::NumberFor, - >, - key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, - ) -> Option<()> { - let key_owner_proof = key_owner_proof.decode()?; - - Grandpa::submit_unsigned_equivocation_report( - equivocation_proof, - key_owner_proof, - ) - } - - fn generate_key_ownership_proof( - _set_id: fg_primitives::SetId, - authority_id: fg_primitives::AuthorityId, - ) -> Option { - use codec::Encode; - - Historical::prove((fg_primitives::KEY_TYPE, authority_id)) - .map(|p| p.encode()) - .map(fg_primitives::OpaqueKeyOwnershipProof::new) - } - } - - impl babe_primitives::BabeApi for Runtime { - fn configuration() -> babe_primitives::BabeGenesisConfiguration { - // The choice of `c` parameter (where `1 - c` represents the - // probability of a slot being empty), is done in accordance to the - // slot duration and expected target block time, for safely - // resisting network delays of maximum two seconds. - // - babe_primitives::BabeGenesisConfiguration { - slot_duration: Babe::slot_duration(), - epoch_length: EpochDuration::get(), - c: PRIMARY_PROBABILITY, - genesis_authorities: Babe::authorities(), - randomness: Babe::randomness(), - allowed_slots: babe_primitives::AllowedSlots::PrimaryAndSecondaryPlainSlots, - } - } - - fn current_epoch_start() -> babe_primitives::SlotNumber { - Babe::current_epoch_start() - } - - fn generate_key_ownership_proof( - _slot_number: babe_primitives::SlotNumber, - authority_id: babe_primitives::AuthorityId, - ) -> Option { - use codec::Encode; - - Historical::prove((babe_primitives::KEY_TYPE, authority_id)) - .map(|p| p.encode()) - .map(babe_primitives::OpaqueKeyOwnershipProof::new) - } - - fn submit_report_equivocation_unsigned_extrinsic( - equivocation_proof: babe_primitives::EquivocationProof<::Header>, - key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, - ) -> Option<()> { - let key_owner_proof = key_owner_proof.decode()?; - - Babe::submit_unsigned_equivocation_report( - equivocation_proof, - key_owner_proof, - ) - } - } - - impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { - fn authorities() -> Vec { - AuthorityDiscovery::authorities() - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Nonce { - System::account_nonce(account) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< - Block, - Balance, - > for Runtime { - fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - } -} /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Extrinsic type that has already been checked. @@ -388,57 +179,41 @@ construct_runtime! { AuthorityDiscovery: pallet_authority_discovery::{Module, Call, Config}, // Parachains modules. - ParachainOrigin: parachains_origin::{Module, Origin}, - Config: parachains_configuration::{Module, Call, Storage}, + ParachainsOrigin: parachains_origin::{Module, Origin}, + ParachainsConfiguration: parachains_configuration::{Module, Call, Storage, Config}, Inclusion: parachains_inclusion::{Module, Call, Storage, Event}, InclusionInherent: parachains_inclusion_inherent::{Module, Call, Storage, Inherent}, Scheduler: parachains_scheduler::{Module, Call, Storage}, Paras: parachains_paras::{Module, Call, Storage}, Initializer: parachains_initializer::{Module, Call, Storage}, - Router: parachains_router::{Module, Call, Storage}, - - Registrar: paras_registrar::{Module, Call, Storage}, - ParasSudoWrapper: paras_sudo_wrapper::{Module, Call}, - } -} - -pub struct BaseFilter; -impl Filter for BaseFilter { - fn filter(_call: &Call) -> bool { - true - } -} + Dmp: parachains_dmp::{Module, Call, Storage}, + Ump: parachains_ump::{Module, Call, Storage}, + Hrmp: parachains_hrmp::{Module, Call, Storage}, -/// Runtime version (Rococo). -pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("rococo-v1"), - impl_name: create_runtime_str!("parity-rococo-v1"), - authoring_version: 0, - spec_version: 1, - impl_version: 0, - #[cfg(not(feature = "disable-runtime-api"))] - apis: RUNTIME_API_VERSIONS, - #[cfg(feature = "disable-runtime-api")] - apis: sp_version::create_apis_vec![[]], - transaction_version: 2, -}; - -/// Native version. -#[cfg(any(feature = "std", test))] -pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), + Registrar: paras_registrar::{Module, Call, Storage}, + ParasSudoWrapper: paras_sudo_wrapper::{Module, Call}, + + // Sudo + Sudo: pallet_sudo::{Module, Call, Storage, Event, Config}, } } +pub struct BaseFilter; +impl Filter for BaseFilter { + fn filter(_call: &Call) -> bool { + true + } +} parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = BaseFilter; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; + type DbWeight = RocksDbWeight; type Origin = Origin; type Call = Call; type Index = Nonce; @@ -450,13 +225,6 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = RocksDbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -482,7 +250,7 @@ impl frame_system::offchain::CreateSignedTransaction for R call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { // take the biggest period possible. let period = BlockHashCount::get() @@ -521,7 +289,7 @@ impl frame_system::offchain::SigningTypes for Runtime { type Signature = Signature; } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -561,7 +329,7 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); } -impl pallet_im_online::Trait for Runtime { +impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type ReportUnresponsiveness = Offences; @@ -570,7 +338,7 @@ impl pallet_im_online::Trait for Runtime { type WeightInfo = (); } -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::U128CurrencyToVote; @@ -591,7 +359,7 @@ impl pallet_staking::Trait for Runtime { type Call = Call; type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = MaxIterations; - type OffchainSolutionWeightLimit = MaximumBlockWeight; + type OffchainSolutionWeightLimit = OffchainSolutionWeightLimit; type MinSolutionScoreBump = MinSolutionScoreBump; type WeightInfo = (); } @@ -601,7 +369,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; @@ -625,22 +393,22 @@ parameter_types! { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime {} parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -651,9 +419,8 @@ parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; } -impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = ToAuthor; +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; @@ -663,7 +430,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; @@ -681,7 +448,7 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; @@ -710,7 +477,7 @@ parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; @@ -722,7 +489,7 @@ parameter_types! { pub const AttestationPeriod: BlockNumber = 50; } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -746,39 +513,302 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); type EventHandler = (Staking, ImOnline); } -impl parachains_origin::Trait for Runtime {} +impl parachains_origin::Config for Runtime {} -impl parachains_configuration::Trait for Runtime {} +impl parachains_configuration::Config for Runtime {} -impl parachains_inclusion::Trait for Runtime { +impl parachains_inclusion::Config for Runtime { type Event = Event; } -impl parachains_paras::Trait for Runtime { +impl parachains_paras::Config for Runtime { type Origin = Origin; } -impl parachains_router::Trait for Runtime {} +impl parachains_session_info::Config for Runtime {} + +impl parachains_ump::Config for Runtime { + type UmpSink = (); // TODO: #1873 To be handled by the XCM receiver. +} + +impl parachains_dmp::Config for Runtime {} + +impl parachains_hrmp::Config for Runtime { + type Origin = Origin; +} -impl parachains_inclusion_inherent::Trait for Runtime {} +impl parachains_inclusion_inherent::Config for Runtime {} -impl parachains_scheduler::Trait for Runtime {} +impl parachains_scheduler::Config for Runtime {} -impl parachains_initializer::Trait for Runtime { +impl parachains_initializer::Config for Runtime { type Randomness = Babe; } -impl paras_sudo_wrapper::Trait for Runtime {} +impl paras_sudo_wrapper::Config for Runtime {} -impl paras_registrar::Trait for Runtime { +impl paras_registrar::Config for Runtime { type Currency = Balances; type ParathreadDeposit = ParathreadDeposit; type Origin = Origin; } + +impl pallet_sudo::Config for Runtime { + type Event = Event; + type Call = Call; +} + +#[cfg(not(feature = "disable-runtime-api"))] +sp_api::impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl block_builder_api::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: inherents::InherentData, + ) -> inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + Babe::randomness().into() + } + } + + impl tx_pool_api::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) + } + } + + impl offchain_primitives::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl primitives::v1::ParachainHost for Runtime { + fn validators() -> Vec { + runtime_api_impl::validators::() + } + + fn validator_groups() -> (Vec>, GroupRotationInfo) { + runtime_api_impl::validator_groups::() + } + + fn availability_cores() -> Vec> { + runtime_api_impl::availability_cores::() + } + + fn full_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option> { + runtime_api_impl::full_validation_data::(para_id, assumption) + } + + fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option> { + runtime_api_impl::persisted_validation_data::(para_id, assumption) + } + + fn check_validation_outputs( + para_id: Id, + outputs: primitives::v1::CandidateCommitments, + ) -> bool { + runtime_api_impl::check_validation_outputs::(para_id, outputs) + } + + fn session_index_for_child() -> SessionIndex { + runtime_api_impl::session_index_for_child::() + } + + fn validation_code(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option { + runtime_api_impl::validation_code::(para_id, assumption) + } + + fn historical_validation_code(para_id: Id, context_height: BlockNumber) + -> Option + { + runtime_api_impl::historical_validation_code::(para_id, context_height) + } + + fn candidate_pending_availability(para_id: Id) -> Option> { + runtime_api_impl::candidate_pending_availability::(para_id) + } + + fn candidate_events() -> Vec> { + runtime_api_impl::candidate_events::(|ev| { + match ev { + Event::parachains_inclusion(ev) => { + Some(ev) + } + _ => None, + } + }) + } + + fn session_info(index: SessionIndex) -> Option { + runtime_api_impl::session_info::(index) + } + + fn dmq_contents(recipient: Id) -> Vec> { + runtime_api_impl::dmq_contents::(recipient) + } + + fn inbound_hrmp_channels_contents( + recipient: Id + ) -> BTreeMap>> { + runtime_api_impl::inbound_hrmp_channels_contents::(recipient) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> Vec<(GrandpaId, u64)> { + Grandpa::grandpa_authorities() + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: fg_primitives::EquivocationProof< + ::Hash, + sp_runtime::traits::NumberFor, + >, + key_owner_proof: fg_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Grandpa::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + + fn generate_key_ownership_proof( + _set_id: fg_primitives::SetId, + authority_id: fg_primitives::AuthorityId, + ) -> Option { + use parity_scale_codec::Encode; + + Historical::prove((fg_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(fg_primitives::OpaqueKeyOwnershipProof::new) + } + } + + impl babe_primitives::BabeApi for Runtime { + fn configuration() -> babe_primitives::BabeGenesisConfiguration { + // The choice of `c` parameter (where `1 - c` represents the + // probability of a slot being empty), is done in accordance to the + // slot duration and expected target block time, for safely + // resisting network delays of maximum two seconds. + // + babe_primitives::BabeGenesisConfiguration { + slot_duration: Babe::slot_duration(), + epoch_length: EpochDuration::get(), + c: PRIMARY_PROBABILITY, + genesis_authorities: Babe::authorities(), + randomness: Babe::randomness(), + allowed_slots: babe_primitives::AllowedSlots::PrimaryAndSecondaryPlainSlots, + } + } + + fn current_epoch_start() -> babe_primitives::SlotNumber { + Babe::current_epoch_start() + } + + fn generate_key_ownership_proof( + _slot_number: babe_primitives::SlotNumber, + authority_id: babe_primitives::AuthorityId, + ) -> Option { + use parity_scale_codec::Encode; + + Historical::prove((babe_primitives::KEY_TYPE, authority_id)) + .map(|p| p.encode()) + .map(babe_primitives::OpaqueKeyOwnershipProof::new) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: babe_primitives::EquivocationProof<::Header>, + key_owner_proof: babe_primitives::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let key_owner_proof = key_owner_proof.decode()?; + + Babe::submit_unsigned_equivocation_report( + equivocation_proof, + key_owner_proof, + ) + } + } + + impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { + fn authorities() -> Vec { + AuthorityDiscovery::authorities() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Nonce { + System::account_nonce(account) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + > for Runtime { + fn query_info(uxt: ::Extrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + } +} diff --git a/runtime/test-runtime/Cargo.toml b/runtime/test-runtime/Cargo.toml index be646dd63de64d97be9a97741d694b3c4d48335b..10b59b9f52676257895444f0be4cddb1860df02f 100644 --- a/runtime/test-runtime/Cargo.toml +++ b/runtime/test-runtime/Cargo.toml @@ -1,55 +1,55 @@ [package] name = "polkadot-test-runtime" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -log = { version = "0.3.9", optional = true } -rustc-hex = { version = "2.0.1", default-features = false } -serde = { version = "1.0.102", default-features = false } -serde_derive = { version = "1.0.102", optional = true } -smallvec = "1.4.1" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +log = { version = "0.4.11", optional = true } +rustc-hex = { version = "2.1.0", default-features = false } +serde = { version = "1.0.118", default-features = false } +serde_derive = { version = "1.0.117", optional = true } +smallvec = "1.5.1" -authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "master" } -frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking-reward-curve = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } @@ -57,15 +57,15 @@ polkadot-parachain = { path = "../../parachain", default-features = false } polkadot-runtime-parachains = { path = "../parachains", default-features = false } [dev-dependencies] -hex-literal = "0.2.1" -libsecp256k1 = "0.3.2" -tiny-keccak = "1.5.0" -keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -serde_json = "1.0.41" +hex-literal = "0.3.1" +libsecp256k1 = "0.3.5" +tiny-keccak = "2.0.2" +keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +serde_json = "1.0.60" [build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } +substrate-wasm-builder = "3.0.0" [features] default = ["std"] @@ -77,7 +77,7 @@ std = [ "bitvec/std", "primitives/std", "rustc-hex/std", - "codec/std", + "parity-scale-codec/std", "inherents/std", "sp-core/std", "polkadot-parachain/std", diff --git a/runtime/test-runtime/build.rs b/runtime/test-runtime/build.rs index e0c89e5649323fe970ccf449a2182ccdf3cb9ac7..a75ebb4edbe1b3c55e23a2700a5d48efcaae1e54 100644 --- a/runtime/test-runtime/build.rs +++ b/runtime/test-runtime/build.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .import_memory() .export_heap_base() .build() diff --git a/runtime/test-runtime/src/constants.rs b/runtime/test-runtime/src/constants.rs index b18501b714b1f903d9d1144d833f96adff55db83..4c00475f4b9af76a76100ab143025d6d76b87e8a 100644 --- a/runtime/test-runtime/src/constants.rs +++ b/runtime/test-runtime/src/constants.rs @@ -28,10 +28,10 @@ pub mod currency { pub mod time { use primitives::v0::{Moment, BlockNumber}; // Testnet - pub const MILLISECS_PER_BLOCK: Moment = 1000; + pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; - // Testnet - pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES; + // 30 seconds for now + pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = MINUTES / 2; // These time units are defined in number of blocks. pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 6e4913a0dc8873f77844ed9056ce0259274d4843..8b4c55391d82c8c9413da005e67eb49102762c85 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -20,26 +20,32 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] +use pallet_transaction_payment::CurrencyAdapter; use sp_std::prelude::*; -use codec::Encode; -use polkadot_runtime_parachains::{ - configuration, - inclusion, - initializer, - paras, - router, - runtime_api_impl::v1 as runtime_impl, - scheduler, -}; +use sp_std::collections::btree_map::BTreeMap; +use parity_scale_codec::Encode; + +use polkadot_runtime_parachains::configuration as parachains_configuration; +use polkadot_runtime_parachains::inclusion as parachains_inclusion; +use polkadot_runtime_parachains::inclusion_inherent as parachains_inclusion_inherent; +use polkadot_runtime_parachains::initializer as parachains_initializer; +use polkadot_runtime_parachains::session_info as parachains_session_info; +use polkadot_runtime_parachains::paras as parachains_paras; +use polkadot_runtime_parachains::dmp as parachains_dmp; +use polkadot_runtime_parachains::ump as parachains_ump; +use polkadot_runtime_parachains::hrmp as parachains_hrmp; +use polkadot_runtime_parachains::scheduler as parachains_scheduler; +use polkadot_runtime_parachains::runtime_api_impl::v1 as runtime_impl; + use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash as HashT, Id as ParaId, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, Signature, ValidationCode, ValidationData, ValidatorId, ValidatorIndex, + InboundDownwardMessage, InboundHrmpMessage, SessionInfo, }; use runtime_common::{ claims, SlowAdjustingFeeUpdate, paras_sudo_wrapper, - BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, - MaximumBlockLength, BlockExecutionWeight, ExtrinsicBaseWeight, ParachainSessionKeyPlaceholder, + BlockHashCount, BlockWeights, BlockLength, }; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, @@ -75,6 +81,7 @@ pub use sp_runtime::BuildStorage; pub use pallet_timestamp::Call as TimestampCall; pub use pallet_balances::Call as BalancesCall; pub use paras_sudo_wrapper::Call as ParasSudoWrapperCall; +pub use pallet_sudo::Call as SudoCall; /// Constant values used within the runtime. pub mod constants; @@ -115,8 +122,11 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; + type DbWeight = (); type Origin = Origin; type Call = Call; type Index = Nonce; @@ -128,13 +138,6 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -155,7 +158,7 @@ parameter_types! { pub storage ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; @@ -183,7 +186,7 @@ parameter_types! { pub storage IndexDeposit: Balance = 1 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; @@ -196,7 +199,7 @@ parameter_types! { pub storage MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; @@ -210,9 +213,8 @@ parameter_types! { pub storage TransactionByteFee: Balance = 10 * MILLICENTS; } -impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = (); +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; @@ -222,7 +224,7 @@ parameter_types! { pub storage SlotDuration: u64 = SLOT_DURATION; pub storage MinimumPeriod: u64 = SlotDuration::get() / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -234,7 +236,7 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -250,7 +252,8 @@ impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, - pub parachain_validator: ParachainSessionKeyPlaceholder, + pub parachain_validator: Initializer, + pub authority_discovery: AuthorityDiscovery, } } @@ -258,7 +261,7 @@ parameter_types! { pub storage DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; @@ -271,7 +274,7 @@ impl pallet_session::Trait for Runtime { type WeightInfo = (); } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -302,7 +305,7 @@ parameter_types! { pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); } -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = frame_support::traits::U128CurrencyToVote; @@ -323,13 +326,13 @@ impl pallet_staking::Trait for Runtime { type Call = Call; type UnsignedPriority = StakingUnsignedPriority; type MaxIterations = MaxIterations; - type OffchainSolutionWeightLimit = MaximumBlockWeight; + type OffchainSolutionWeightLimit = (); type MinSolutionScoreBump = MinSolutionScoreBump; type WeightInfo = (); } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -355,7 +358,7 @@ impl frame_system::offchain::CreateSignedTransaction for R call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { let period = BlockHashCount::get() .checked_next_power_of_two() @@ -393,17 +396,17 @@ impl frame_system::offchain::SigningTypes for Runtime { } parameter_types! { - pub storage OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub storage OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime {} parameter_types! { pub storage LeasePeriod: BlockNumber = 100_000; @@ -414,7 +417,7 @@ parameter_types! { pub Prefix: &'static [u8] = b"Pay KSMs to the Kusama account:"; } -impl claims::Trait for Runtime { +impl claims::Config for Runtime { type Event = Event; type VestingSchedule = Vesting; type Prefix = Prefix; @@ -425,7 +428,7 @@ parameter_types! { pub storage MinVestedTransfer: Balance = 100 * DOLLARS; } -impl pallet_vesting::Trait for Runtime { +impl pallet_vesting::Config for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; @@ -433,30 +436,42 @@ impl pallet_vesting::Trait for Runtime { type WeightInfo = (); } -impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } -impl configuration::Trait for Runtime {} +impl parachains_configuration::Config for Runtime {} -impl inclusion::Trait for Runtime { +impl parachains_inclusion::Config for Runtime { type Event = Event; } -impl initializer::Trait for Runtime { +impl parachains_inclusion_inherent::Config for Runtime {} + +impl parachains_initializer::Config for Runtime { type Randomness = RandomnessCollectiveFlip; } -impl paras::Trait for Runtime { +impl parachains_session_info::Config for Runtime {} + +impl parachains_paras::Config for Runtime { type Origin = Origin; } -impl router::Trait for Runtime {} +impl parachains_dmp::Config for Runtime {} + +impl parachains_ump::Config for Runtime { + type UmpSink = (); +} + +impl parachains_hrmp::Config for Runtime { + type Origin = Origin; +} -impl scheduler::Trait for Runtime {} +impl parachains_scheduler::Config for Runtime {} -impl paras_sudo_wrapper::Trait for Runtime {} +impl paras_sudo_wrapper::Config for Runtime {} construct_runtime! { pub enum Runtime where @@ -492,11 +507,12 @@ construct_runtime! { Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, // Parachains runtime modules - Configuration: configuration::{Module, Call, Storage}, - Inclusion: inclusion::{Module, Call, Storage, Event}, - Initializer: initializer::{Module, Call, Storage}, - Paras: paras::{Module, Call, Storage, Origin}, - Scheduler: scheduler::{Module, Call, Storage}, + ParachainsConfiguration: parachains_configuration::{Module, Call, Storage, Config}, + Inclusion: parachains_inclusion::{Module, Call, Storage, Event}, + InclusionInherent: parachains_inclusion_inherent::{Module, Call, Storage, Inherent}, + Initializer: parachains_initializer::{Module, Call, Storage}, + Paras: parachains_paras::{Module, Call, Storage, Origin}, + Scheduler: parachains_scheduler::{Module, Call, Storage}, ParasSudoWrapper: paras_sudo_wrapper::{Module, Call}, Sudo: pallet_sudo::{Module, Call, Storage, Config, Event}, @@ -598,7 +614,7 @@ sp_api::impl_runtime_apis! { impl authority_discovery_primitives::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { - Vec::new() + AuthorityDiscovery::authorities() } } @@ -629,7 +645,7 @@ sp_api::impl_runtime_apis! { fn check_validation_outputs( para_id: ParaId, - outputs: primitives::v1::ValidationOutputs, + outputs: primitives::v1::CandidateCommitments, ) -> bool { runtime_impl::check_validation_outputs::(para_id, outputs) } @@ -644,6 +660,13 @@ sp_api::impl_runtime_apis! { runtime_impl::validation_code::(para_id, assumption) } + fn historical_validation_code(para_id: ParaId, context_height: BlockNumber) + -> Option + { + runtime_impl::historical_validation_code::(para_id, context_height) + } + + fn candidate_pending_availability(para_id: ParaId) -> Option> { runtime_impl::candidate_pending_availability::(para_id) } @@ -653,15 +676,21 @@ sp_api::impl_runtime_apis! { runtime_impl::candidate_events::(|trait_event| trait_event.try_into().ok()) } - fn validator_discovery(validators: Vec) -> Vec> { - runtime_impl::validator_discovery::(validators) + fn session_info(index: SessionIndex) -> Option { + runtime_impl::session_info::(index) } fn dmq_contents( recipient: ParaId, - ) -> Vec> { + ) -> Vec> { runtime_impl::dmq_contents::(recipient) } + + fn inbound_hrmp_channels_contents( + recipient: ParaId, + ) -> BTreeMap>> { + runtime_impl::inbound_hrmp_channels_contents::(recipient) + } } impl fg_primitives::GrandpaApi for Runtime { diff --git a/runtime/westend/Cargo.toml b/runtime/westend/Cargo.toml index d9408830fc21d98960e2ab6ec453af703da85814..9060f64a2fe54e150e927f9850170ef824d36c14 100644 --- a/runtime/westend/Cargo.toml +++ b/runtime/westend/Cargo.toml @@ -1,90 +1,90 @@ [package] name = "westend-runtime" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" [dependencies] bitvec = { version = "0.17.4", default-features = false, features = ["alloc"] } -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -log = { version = "0.3.9", optional = true } -rustc-hex = { version = "2.0.1", default-features = false } -serde = { version = "1.0.102", default-features = false } -serde_derive = { version = "1.0.102", optional = true } -smallvec = "1.4.1" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +log = { version = "0.4.11", optional = true } +rustc-hex = { version = "2.1.0", default-features = false } +serde = { version = "1.0.118", default-features = false } +serde_derive = { version = "1.0.117", optional = true } +smallvec = "1.5.1" static_assertions = "1.1.0" -authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-version = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +authority-discovery-primitives = { package = "sp-authority-discovery", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +offchain-primitives = { package = "sp-offchain", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-std = { package = "sp-std", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-version = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +tx-pool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +block-builder-api = { package = "sp-block-builder", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-executive = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-recovery = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-session = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-society = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "master" } -pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system = {git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +pallet-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-babe = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-balances = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-collective = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-democracy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-elections-phragmen = { package = "pallet-elections-phragmen", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-executive = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-identity = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-im-online = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-indices = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-membership = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-multisig = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-nicks = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-offences = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-proxy = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-randomness-collective-flip = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-recovery = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-scheduler = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-session = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-society = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-staking-reward-curve = { package = "pallet-staking-reward-curve", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +pallet-sudo = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system = {git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-system-rpc-runtime-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-treasury = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-utility = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +pallet-vesting = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } -frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false, optional = true } -hex-literal = { version = "0.2.1", optional = true } +frame-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +frame-system-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +pallet-offences-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +pallet-session-benchmarking = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false, optional = true } +hex-literal = { version = "0.3.1", optional = true } runtime-common = { package = "polkadot-runtime-common", path = "../common", default-features = false } primitives = { package = "polkadot-primitives", path = "../../primitives", default-features = false } polkadot-parachain = { path = "../../parachain", default-features = false } [dev-dependencies] -hex-literal = "0.2.1" -libsecp256k1 = "0.3.2" -tiny-keccak = "1.5.0" -keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "master" } -sp-trie = { git = "https://github.com/paritytech/substrate", branch = "master" } -serde_json = "1.0.41" +hex-literal = "0.3.1" +libsecp256k1 = "0.3.5" +tiny-keccak = "2.0.2" +keyring = { package = "sp-keyring", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-trie = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +serde_json = "1.0.60" [build-dependencies] -wasm-builder-runner = { package = "substrate-wasm-builder-runner", version = "2.0.0" } +substrate-wasm-builder = "3.0.0" [features] default = ["std"] @@ -96,7 +96,7 @@ std = [ "bitvec/std", "primitives/std", "rustc-hex/std", - "codec/std", + "parity-scale-codec/std", "inherents/std", "sp-core/std", "polkadot-parachain/std", diff --git a/runtime/westend/build.rs b/runtime/westend/build.rs index 8c7a1e35dda5e131a2a71e69571ba9fdee06743d..e4a139a06ae1a85fa05f0f90f568a1a7c2839160 100644 --- a/runtime/westend/build.rs +++ b/runtime/westend/build.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use wasm_builder_runner::WasmBuilder; +use substrate_wasm_builder::WasmBuilder; fn main() { WasmBuilder::new() .with_current_project() - .with_wasm_builder_from_crates("2.0.1") .import_memory() .export_heap_base() .build() diff --git a/runtime/westend/src/constants.rs b/runtime/westend/src/constants.rs index 6fb7e934e1f1d166056ea21f6ca3fe9ff8199fb0..ed740007041bd9f7d99d00c7b1ac7b55db82f3cf 100644 --- a/runtime/westend/src/constants.rs +++ b/runtime/westend/src/constants.rs @@ -61,7 +61,7 @@ pub mod fee { /// node's balance type. /// /// This should typically create a mapping between the following ranges: - /// - [0, frame_system::MaximumBlockWeight] + /// - [0, MAXIMUM_BLOCK_WEIGHT] /// - [Balance::min, Balance::max] /// /// Yet, it can be used for any other sort of change to weight-fee. Some examples being: @@ -87,16 +87,16 @@ pub mod fee { #[cfg(test)] mod tests { use frame_support::weights::WeightToFeePolynomial; - use runtime_common::{MaximumBlockWeight, ExtrinsicBaseWeight}; + use runtime_common::{MAXIMUM_BLOCK_WEIGHT, ExtrinsicBaseWeight}; use super::fee::WeightToFee; use super::currency::{CENTS, DOLLARS, MILLICENTS}; #[test] - // This function tests that the fee for `MaximumBlockWeight` of weight is correct + // This function tests that the fee for `MAXIMUM_BLOCK_WEIGHT` of weight is correct fn full_block_fee_is_correct() { // A full block should cost 16 DOLLARS println!("Base: {}", ExtrinsicBaseWeight::get()); - let x = WeightToFee::calc(&MaximumBlockWeight::get()); + let x = WeightToFee::calc(&MAXIMUM_BLOCK_WEIGHT); let y = 16 * DOLLARS; assert!(x.max(y) - x.min(y) < MILLICENTS); } diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index c048d3d43c66e84e88063cf6946473fcf9d251af..236aaf434f13b47f263eadfccf67997ddbb44703 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -20,18 +20,20 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] +use pallet_transaction_payment::CurrencyAdapter; use sp_std::prelude::*; -use codec::{Encode, Decode}; +use sp_std::collections::btree_map::BTreeMap; +use parity_scale_codec::{Encode, Decode}; use primitives::v1::{ AccountId, AccountIndex, Balance, BlockNumber, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id, Moment, Nonce, OccupiedCoreAssumption, PersistedValidationData, Signature, ValidationCode, ValidationData, ValidatorId, ValidatorIndex, + InboundDownwardMessage, InboundHrmpMessage, SessionInfo, }; use runtime_common::{ SlowAdjustingFeeUpdate, CurrencyToVote, impls::ToAuthor, - BlockHashCount, MaximumBlockWeight, AvailableBlockRatio, MaximumBlockLength, - BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, MaximumExtrinsicWeight, + BlockHashCount, BlockWeights, BlockLength, RocksDbWeight, OffchainSolutionWeightLimit, ParachainSessionKeyPlaceholder, }; use sp_runtime::{ @@ -85,7 +87,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("westend"), impl_name: create_runtime_str!("parity-westend"), authoring_version: 2, - spec_version: 46, + spec_version: 47, impl_version: 0, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -115,8 +117,10 @@ parameter_types! { pub const Version: RuntimeVersion = VERSION; } -impl frame_system::Trait for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = BaseFilter; + type BlockWeights = BlockWeights; + type BlockLength = BlockLength; type Origin = Origin; type Call = Call; type Index = Nonce; @@ -128,13 +132,7 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; type DbWeight = RocksDbWeight; - type BlockExecutionWeight = BlockExecutionWeight; - type ExtrinsicBaseWeight = ExtrinsicBaseWeight; - type MaximumExtrinsicWeight = MaximumExtrinsicWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; type Version = Version; type PalletInfo = PalletInfo; type AccountData = pallet_balances::AccountData; @@ -144,15 +142,17 @@ impl frame_system::Trait for Runtime { } parameter_types! { + pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * + BlockWeights::get().max_block; pub const MaxScheduledPerBlock: u32 = 50; } -impl pallet_scheduler::Trait for Runtime { +impl pallet_scheduler::Config for Runtime { type Event = Event; type Origin = Origin; type PalletsOrigin = OriginCaller; type Call = Call; - type MaximumWeight = MaximumBlockWeight; + type MaximumWeight = MaximumSchedulerWeight; type ScheduleOrigin = EnsureRoot; type MaxScheduledPerBlock = MaxScheduledPerBlock; type WeightInfo = weights::pallet_scheduler::WeightInfo; @@ -163,7 +163,7 @@ parameter_types! { pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } -impl pallet_babe::Trait for Runtime { +impl pallet_babe::Config for Runtime { type EpochDuration = EpochDuration; type ExpectedBlockTime = ExpectedBlockTime; @@ -192,7 +192,7 @@ parameter_types! { pub const IndexDeposit: Balance = 1 * DOLLARS; } -impl pallet_indices::Trait for Runtime { +impl pallet_indices::Config for Runtime { type AccountIndex = AccountIndex; type Currency = Balances; type Deposit = IndexDeposit; @@ -205,7 +205,7 @@ parameter_types! { pub const MaxLocks: u32 = 50; } -impl pallet_balances::Trait for Runtime { +impl pallet_balances::Config for Runtime { type Balance = Balance; type DustRemoval = (); type Event = Event; @@ -219,9 +219,8 @@ parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; } -impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = ToAuthor; +impl pallet_transaction_payment::Config for Runtime { + type OnChargeTransaction = CurrencyAdapter>; type TransactionByteFee = TransactionByteFee; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = SlowAdjustingFeeUpdate; @@ -230,7 +229,7 @@ impl pallet_transaction_payment::Trait for Runtime { parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } -impl pallet_timestamp::Trait for Runtime { +impl pallet_timestamp::Config for Runtime { type Moment = u64; type OnTimestampSet = Babe; type MinimumPeriod = MinimumPeriod; @@ -242,7 +241,7 @@ parameter_types! { } // TODO: substrate#2986 implement this properly -impl pallet_authorship::Trait for Runtime { +impl pallet_authorship::Config for Runtime { type FindAuthor = pallet_session::FindAccountFromAuthorIndex; type UncleGenerations = UncleGenerations; type FilterUncle = (); @@ -268,7 +267,7 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } -impl pallet_session::Trait for Runtime { +impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; @@ -281,7 +280,7 @@ impl pallet_session::Trait for Runtime { type WeightInfo = weights::pallet_session::WeightInfo; } -impl pallet_session::historical::Trait for Runtime { +impl pallet_session::historical::Config for Runtime { type FullIdentification = pallet_staking::Exposure; type FullIdentificationOf = pallet_staking::ExposureOf; } @@ -310,12 +309,9 @@ parameter_types! { pub const ElectionLookahead: BlockNumber = EPOCH_DURATION_IN_BLOCKS / 4; pub const MaxIterations: u32 = 10; pub MinSolutionScoreBump: Perbill = Perbill::from_rational_approximation(5u32, 10_000); - pub OffchainSolutionWeightLimit: Weight = MaximumExtrinsicWeight::get() - .saturating_sub(BlockExecutionWeight::get()) - .saturating_sub(ExtrinsicBaseWeight::get()); } -impl pallet_staking::Trait for Runtime { +impl pallet_staking::Config for Runtime { type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVote; @@ -354,17 +350,17 @@ parameter_types! { } parameter_types! { - pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * MaximumBlockWeight::get(); + pub OffencesWeightSoftLimit: Weight = Perbill::from_percent(60) * BlockWeights::get().max_block; } -impl pallet_offences::Trait for Runtime { +impl pallet_offences::Config for Runtime { type Event = Event; type IdentificationTuple = pallet_session::historical::IdentificationTuple; type OnOffenceHandler = Staking; type WeightSoftLimit = OffencesWeightSoftLimit; } -impl pallet_authority_discovery::Trait for Runtime {} +impl pallet_authority_discovery::Config for Runtime {} parameter_types! { pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_BLOCKS as _; @@ -375,7 +371,7 @@ parameter_types! { pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); } -impl pallet_im_online::Trait for Runtime { +impl pallet_im_online::Config for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type ReportUnresponsiveness = Offences; @@ -384,7 +380,7 @@ impl pallet_im_online::Trait for Runtime { type WeightInfo = weights::pallet_im_online::WeightInfo; } -impl pallet_grandpa::Trait for Runtime { +impl pallet_grandpa::Config for Runtime { type Event = Event; type Call = Call; @@ -412,7 +408,7 @@ impl frame_system::offchain::CreateSignedTransaction for R call: Call, public: ::Signer, account: AccountId, - nonce: ::Index, + nonce: ::Index, ) -> Option<(Call, ::SignaturePayload)> { // take the biggest period possible. let period = BlockHashCount::get() @@ -468,7 +464,7 @@ parameter_types! { pub const MaxRegistrars: u32 = 20; } -impl pallet_identity::Trait for Runtime { +impl pallet_identity::Config for Runtime { type Event = Event; type Currency = Balances; type Slashed = (); @@ -483,7 +479,7 @@ impl pallet_identity::Trait for Runtime { type WeightInfo = weights::pallet_identity::WeightInfo; } -impl pallet_utility::Trait for Runtime { +impl pallet_utility::Config for Runtime { type Event = Event; type Call = Call; type WeightInfo = weights::pallet_utility::WeightInfo; @@ -497,7 +493,7 @@ parameter_types! { pub const MaxSignatories: u16 = 100; } -impl pallet_multisig::Trait for Runtime { +impl pallet_multisig::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -514,7 +510,7 @@ parameter_types! { pub const RecoveryDeposit: Balance = 5 * DOLLARS; } -impl pallet_recovery::Trait for Runtime { +impl pallet_recovery::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -528,7 +524,7 @@ parameter_types! { pub const MinVestedTransfer: Balance = 100 * DOLLARS; } -impl pallet_vesting::Trait for Runtime { +impl pallet_vesting::Config for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; @@ -536,7 +532,7 @@ impl pallet_vesting::Trait for Runtime { type WeightInfo = weights::pallet_vesting::WeightInfo; } -impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { type Event = Event; type Call = Call; } @@ -600,7 +596,9 @@ impl InstanceFilter for ProxyType { Call::Multisig(..) ), ProxyType::Staking => matches!(c, - Call::Staking(..) | Call::Utility(..) + Call::Staking(..) | + Call::Session(..) | + Call::Utility(..) ), ProxyType::SudoBalances => match c { Call::Sudo(pallet_sudo::Call::sudo(ref x)) => matches!(x.as_ref(), &Call::Balances(..)), @@ -608,8 +606,8 @@ impl InstanceFilter for ProxyType { _ => false, }, ProxyType::IdentityJudgement => matches!(c, - Call::Identity(pallet_identity::Call::provide_judgement(..)) - | Call::Utility(pallet_utility::Call::batch(..)) + Call::Identity(pallet_identity::Call::provide_judgement(..)) | + Call::Utility(..) ) } } @@ -624,7 +622,7 @@ impl InstanceFilter for ProxyType { } } -impl pallet_proxy::Trait for Runtime { +impl pallet_proxy::Config for Runtime { type Event = Event; type Call = Call; type Currency = Balances; @@ -821,9 +819,13 @@ sp_api::impl_runtime_apis! { None } + fn historical_validation_code(_: Id, _: BlockNumber) -> Option { + None + } + fn check_validation_outputs( _: Id, - _: primitives::v1::ValidationOutputs + _: primitives::v1::CandidateCommitments ) -> bool { false } @@ -832,6 +834,10 @@ sp_api::impl_runtime_apis! { 0 } + fn session_info(_: SessionIndex) -> Option { + None + } + fn validation_code(_: Id, _: OccupiedCoreAssumption) -> Option { None } @@ -844,15 +850,17 @@ sp_api::impl_runtime_apis! { Vec::new() } - fn validator_discovery(_: Vec) -> Vec> { - Vec::new() - } - fn dmq_contents( _recipient: Id, - ) -> Vec> { + ) -> Vec> { Vec::new() } + + fn inbound_hrmp_channels_contents( + _recipient: Id + ) -> BTreeMap>> { + BTreeMap::new() + } } impl fg_primitives::GrandpaApi for Runtime { @@ -879,7 +887,7 @@ sp_api::impl_runtime_apis! { _set_id: fg_primitives::SetId, authority_id: fg_primitives::AuthorityId, ) -> Option { - use codec::Encode; + use parity_scale_codec::Encode; Historical::prove((fg_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -912,7 +920,7 @@ sp_api::impl_runtime_apis! { _slot_number: babe_primitives::SlotNumber, authority_id: babe_primitives::AuthorityId, ) -> Option { - use codec::Encode; + use parity_scale_codec::Encode; Historical::prove((babe_primitives::KEY_TYPE, authority_id)) .map(|p| p.encode()) @@ -978,9 +986,9 @@ sp_api::impl_runtime_apis! { use pallet_offences_benchmarking::Module as OffencesBench; use frame_system_benchmarking::Module as SystemBench; - impl pallet_session_benchmarking::Trait for Runtime {} - impl pallet_offences_benchmarking::Trait for Runtime {} - impl frame_system_benchmarking::Trait for Runtime {} + impl pallet_session_benchmarking::Config for Runtime {} + impl pallet_offences_benchmarking::Config for Runtime {} + impl frame_system_benchmarking::Config for Runtime {} let whitelist: Vec = vec![ // Block Number diff --git a/runtime/westend/src/weights/frame_system.rs b/runtime/westend/src/weights/frame_system.rs index 7c1162fd52ba47922ee8185ccc0dc57939c05db6..a3f5ee5b891d34c2cbb4e53899f81c49c052084e 100644 --- a/runtime/westend/src/weights/frame_system.rs +++ b/runtime/westend/src/weights/frame_system.rs @@ -15,7 +15,23 @@ // along with Polkadot. If not, see . //! Weights for frame_system //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-10-31, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=frame_system +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,36 +39,37 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for frame_system. pub struct WeightInfo(PhantomData); -impl frame_system::WeightInfo for WeightInfo { - fn remark(_b: u32) -> Weight { - (1_816_000 as Weight) +impl frame_system::WeightInfo for WeightInfo { + fn remark(_b: u32, ) -> Weight { + (1_859_000 as Weight) } fn set_heap_pages() -> Weight { - (2_526_000 as Weight) + (2_452_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_changes_trie_config() -> Weight { - (9_717_000 as Weight) + (10_157_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((790_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((806_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((561_000 as Weight).saturating_mul(i as Weight)) + .saturating_add((544_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_prefix(p: u32, ) -> Weight { (0 as Weight) - .saturating_add((858_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((866_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn suicide() -> Weight { - (35_710_000 as Weight) + (34_442_000 as Weight) } } diff --git a/runtime/westend/src/weights/pallet_balances.rs b/runtime/westend/src/weights/pallet_balances.rs index e21bc78fb10e44a785c3f1be32afb737f892a7ed..361ad5a6f82aa841e995587858c1893bace39858 100644 --- a/runtime/westend/src/weights/pallet_balances.rs +++ b/runtime/westend/src/weights/pallet_balances.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_balances +//! Autogenerated weights for pallet_balances +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_balances +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,30 +40,31 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_balances. pub struct WeightInfo(PhantomData); -impl pallet_balances::WeightInfo for WeightInfo { +impl pallet_balances::WeightInfo for WeightInfo { fn transfer() -> Weight { - (92_238_000 as Weight) + (95_429_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (63_272_000 as Weight) + (66_088_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (34_822_000 as Weight) + (35_936_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (44_051_000 as Weight) + (45_397_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (90_885_000 as Weight) + (93_993_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_identity.rs b/runtime/westend/src/weights/pallet_identity.rs index 21f5f71e30f5426eba9bba1c918134438d583aae..c69428f7801c253ffcffc41867be3f809451df1f 100644 --- a/runtime/westend/src/weights/pallet_identity.rs +++ b/runtime/westend/src/weights/pallet_identity.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_identity +//! Autogenerated weights for pallet_identity +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_identity +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,114 +40,139 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_identity. pub struct WeightInfo(PhantomData); -impl pallet_identity::WeightInfo for WeightInfo { +impl pallet_identity::WeightInfo for WeightInfo { fn add_registrar(r: u32, ) -> Weight { - (26_576_000 as Weight) - .saturating_add((303_000 as Weight).saturating_mul(r as Weight)) + (27_481_000 as Weight) + // Standard Error: 2_000 + .saturating_add((300_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_identity(r: u32, x: u32, ) -> Weight { - (70_937_000 as Weight) - .saturating_add((202_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_743_000 as Weight).saturating_mul(x as Weight)) + (71_220_000 as Weight) + // Standard Error: 19_000 + .saturating_add((269_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_000 + .saturating_add((1_814_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_subs_new(s: u32, ) -> Weight { - (50_627_000 as Weight) - .saturating_add((9_326_000 as Weight).saturating_mul(s as Weight)) + (52_505_000 as Weight) + // Standard Error: 1_000 + .saturating_add((9_913_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn set_subs_old(p: u32, ) -> Weight { - (46_994_000 as Weight) - .saturating_add((3_274_000 as Weight).saturating_mul(p as Weight)) + (47_853_000 as Weight) + // Standard Error: 0 + .saturating_add((3_432_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } fn clear_identity(r: u32, s: u32, x: u32, ) -> Weight { - (58_587_000 as Weight) - .saturating_add((205_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_271_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((1_017_000 as Weight).saturating_mul(x as Weight)) + (62_074_000 as Weight) + // Standard Error: 8_000 + .saturating_add((169_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((3_436_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((1_058_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn request_judgement(r: u32, x: u32, ) -> Weight { - (71_095_000 as Weight) - .saturating_add((317_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_989_000 as Weight).saturating_mul(x as Weight)) + (72_697_000 as Weight) + // Standard Error: 8_000 + .saturating_add((316_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_064_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel_request(r: u32, x: u32, ) -> Weight { - (61_521_000 as Weight) - .saturating_add((171_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_975_000 as Weight).saturating_mul(x as Weight)) + (62_349_000 as Weight) + // Standard Error: 11_000 + .saturating_add((203_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_048_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_fee(r: u32, ) -> Weight { - (10_490_000 as Weight) - .saturating_add((256_000 as Weight).saturating_mul(r as Weight)) + (10_602_000 as Weight) + // Standard Error: 1_000 + .saturating_add((265_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_account_id(r: u32, ) -> Weight { - (11_703_000 as Weight) - .saturating_add((259_000 as Weight).saturating_mul(r as Weight)) + (12_087_000 as Weight) + // Standard Error: 2_000 + .saturating_add((264_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_fields(r: u32, ) -> Weight { - (10_525_000 as Weight) - .saturating_add((252_000 as Weight).saturating_mul(r as Weight)) + (10_578_000 as Weight) + // Standard Error: 1_000 + .saturating_add((268_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn provide_judgement(r: u32, x: u32, ) -> Weight { - (47_790_000 as Weight) - .saturating_add((284_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((1_982_000 as Weight).saturating_mul(x as Weight)) + (48_552_000 as Weight) + // Standard Error: 8_000 + .saturating_add((279_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_000 + .saturating_add((2_067_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } - fn kill_identity(r: u32, s: u32, x: u32 ) -> Weight { - (96_980_000 as Weight) - .saturating_add((210_000 as Weight).saturating_mul(r as Weight)) - .saturating_add((3_285_000 as Weight).saturating_mul(s as Weight)) - .saturating_add((2_000 as Weight).saturating_mul(x as Weight)) + fn kill_identity(r: u32, s: u32, x: u32, ) -> Weight { + (60_031_000 as Weight) + // Standard Error: 4_000 + .saturating_add((140_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 0 + .saturating_add((3_423_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 + .saturating_add((3_000 as Weight).saturating_mul(x as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn add_sub(s: u32, ) -> Weight { - (71_394_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(s as Weight)) + (71_751_000 as Weight) + // Standard Error: 0 + .saturating_add((185_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn rename_sub(s: u32, ) -> Weight { - (23_806_000 as Weight) - .saturating_add((26_000 as Weight).saturating_mul(s as Weight)) + (23_607_000 as Weight) + // Standard Error: 0 + .saturating_add((23_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_sub(s: u32, ) -> Weight { - (67_756_000 as Weight) - .saturating_add((161_000 as Weight).saturating_mul(s as Weight)) + (68_696_000 as Weight) + // Standard Error: 0 + .saturating_add((160_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn quit_sub(s: u32, ) -> Weight { - (44_687_000 as Weight) - .saturating_add((157_000 as Weight).saturating_mul(s as Weight)) + (45_448_000 as Weight) + // Standard Error: 0 + .saturating_add((155_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_im_online.rs b/runtime/westend/src/weights/pallet_im_online.rs index 07fca4c93543ddc31d8befdbd31353755aed77db..6bcb46d94e3dbfb7c4a5e58a4a4b872975f9409b 100644 --- a/runtime/westend/src/weights/pallet_im_online.rs +++ b/runtime/westend/src/weights/pallet_im_online.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_im_online +//! Autogenerated weights for pallet_im_online +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_im_online +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,12 +40,15 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_im_online. pub struct WeightInfo(PhantomData); -impl pallet_im_online::WeightInfo for WeightInfo { +impl pallet_im_online::WeightInfo for WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight { - (109_072_000 as Weight) + (112_311_000 as Weight) + // Standard Error: 0 .saturating_add((216_000 as Weight).saturating_mul(k as Weight)) - .saturating_add((481_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 1_000 + .saturating_add((497_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_indices.rs b/runtime/westend/src/weights/pallet_indices.rs index 7c45997f2f07c1d3ff55389fb729778387a95783..db2555c63ca085903308d1c4081ff2bd70f67fd2 100644 --- a/runtime/westend/src/weights/pallet_indices.rs +++ b/runtime/westend/src/weights/pallet_indices.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_indices +//! Autogenerated weights for pallet_indices +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_indices +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,30 +40,31 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_indices. pub struct WeightInfo(PhantomData); -impl pallet_indices::WeightInfo for WeightInfo { +impl pallet_indices::WeightInfo for WeightInfo { fn claim() -> Weight { - (50_234_000 as Weight) + (52_389_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer() -> Weight { - (55_731_000 as Weight) + (58_943_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn free() -> Weight { - (44_823_000 as Weight) + (47_207_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (46_183_000 as Weight) + (48_696_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn freeze() -> Weight { - (41_829_000 as Weight) + (44_096_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_multisig.rs b/runtime/westend/src/weights/pallet_multisig.rs index ab0c619bf26cbfab56ead44694e7ca319d9aca25..ebd9e865d80f0fa5904f70f8460860ddbeceda35 100644 --- a/runtime/westend/src/weights/pallet_multisig.rs +++ b/runtime/westend/src/weights/pallet_multisig.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_multisig +//! Autogenerated weights for pallet_multisig +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_multisig +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,84 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_multisig. pub struct WeightInfo(PhantomData); -impl pallet_multisig::WeightInfo for WeightInfo { +impl pallet_multisig::WeightInfo for WeightInfo { fn as_multi_threshold_1(z: u32, ) -> Weight { - (12_481_000 as Weight) + (12_182_000 as Weight) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) } fn as_multi_create(s: u32, z: u32, ) -> Weight { - (65_650_000 as Weight) - .saturating_add((92_000 as Weight).saturating_mul(s as Weight)) + (68_501_000 as Weight) + // Standard Error: 0 + .saturating_add((85_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn as_multi_create_store(s: u32, z: u32, ) -> Weight { - (74_115_000 as Weight) - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + (76_757_000 as Weight) + // Standard Error: 0 + .saturating_add((90_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn as_multi_approve(s: u32, z: u32, ) -> Weight { - (40_671_000 as Weight) - .saturating_add((111_000 as Weight).saturating_mul(s as Weight)) + (40_987_000 as Weight) + // Standard Error: 0 + .saturating_add((110_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn as_multi_approve_store(s: u32, z: u32, ) -> Weight { - (71_781_000 as Weight) - .saturating_add((126_000 as Weight).saturating_mul(s as Weight)) + (73_764_000 as Weight) + // Standard Error: 0 + .saturating_add((120_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((3_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn as_multi_complete(s: u32, z: u32, ) -> Weight { - (83_125_000 as Weight) - .saturating_add((248_000 as Weight).saturating_mul(s as Weight)) + (85_252_000 as Weight) + // Standard Error: 0 + .saturating_add((241_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 0 .saturating_add((5_000 as Weight).saturating_mul(z as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn approve_as_multi_create(s: u32, ) -> Weight { - (65_465_000 as Weight) - .saturating_add((94_000 as Weight).saturating_mul(s as Weight)) + (67_717_000 as Weight) + // Standard Error: 0 + .saturating_add((88_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_as_multi_approve(s: u32, ) -> Weight { - (39_362_000 as Weight) - .saturating_add((113_000 as Weight).saturating_mul(s as Weight)) + (40_372_000 as Weight) + // Standard Error: 0 + .saturating_add((111_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn approve_as_multi_complete(s: u32, ) -> Weight { - (154_095_000 as Weight) - .saturating_add((249_000 as Weight).saturating_mul(s as Weight)) + (157_866_000 as Weight) + // Standard Error: 0 + .saturating_add((243_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn cancel_as_multi(s: u32, ) -> Weight { - (107_420_000 as Weight) - .saturating_add((95_000 as Weight).saturating_mul(s as Weight)) + (109_344_000 as Weight) + // Standard Error: 0 + .saturating_add((90_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_proxy.rs b/runtime/westend/src/weights/pallet_proxy.rs index 36cbb94ad6efeb1aab9406d81bad917a1ef12842..8965ccb2006d1da552608e0f39005dbf403c9ef4 100644 --- a/runtime/westend/src/weights/pallet_proxy.rs +++ b/runtime/westend/src/weights/pallet_proxy.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_proxy +//! Autogenerated weights for pallet_proxy +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_proxy +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,68 +40,81 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_proxy. pub struct WeightInfo(PhantomData); -impl pallet_proxy::WeightInfo for WeightInfo { +impl pallet_proxy::WeightInfo for WeightInfo { fn proxy(p: u32, ) -> Weight { - (30_453_000 as Weight) - .saturating_add((187_000 as Weight).saturating_mul(p as Weight)) + (31_451_000 as Weight) + // Standard Error: 1_000 + .saturating_add((190_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn proxy_announced(a: u32, p: u32, ) -> Weight { - (64_472_000 as Weight) - .saturating_add((799_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((182_000 as Weight).saturating_mul(p as Weight)) + (65_914_000 as Weight) + // Standard Error: 1_000 + .saturating_add((822_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((183_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn remove_announcement(a: u32, p: u32, ) -> Weight { - (41_241_000 as Weight) - .saturating_add((792_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((3_000 as Weight).saturating_mul(p as Weight)) + (41_597_000 as Weight) + // Standard Error: 1_000 + .saturating_add((821_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((11_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - fn reject_announcement(a: u32, p: u32, ) -> Weight { - (40_818_000 as Weight) - .saturating_add((799_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((10_000 as Weight).saturating_mul(p as Weight)) + fn reject_announcement(a: u32, _p: u32, ) -> Weight { + (46_884_000 as Weight) + // Standard Error: 12_000 + .saturating_add((886_000 as Weight).saturating_mul(a as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn announce(a: u32, p: u32, ) -> Weight { - (64_919_000 as Weight) - .saturating_add((695_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((191_000 as Weight).saturating_mul(p as Weight)) + (66_635_000 as Weight) + // Standard Error: 1_000 + .saturating_add((716_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 1_000 + .saturating_add((188_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn add_proxy(p: u32, ) -> Weight { - (43_889_000 as Weight) - .saturating_add((190_000 as Weight).saturating_mul(p as Weight)) + (44_921_000 as Weight) + // Standard Error: 1_000 + .saturating_add((193_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_proxy(p: u32, ) -> Weight { - (39_338_000 as Weight) - .saturating_add((228_000 as Weight).saturating_mul(p as Weight)) + (40_276_000 as Weight) + // Standard Error: 1_000 + .saturating_add((230_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn remove_proxies(p: u32, ) -> Weight { - (37_787_000 as Weight) - .saturating_add((182_000 as Weight).saturating_mul(p as Weight)) + (38_385_000 as Weight) + // Standard Error: 1_000 + .saturating_add((187_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn anonymous(p: u32, ) -> Weight { - (62_203_000 as Weight) - .saturating_add((22_000 as Weight).saturating_mul(p as Weight)) + (63_987_000 as Weight) + // Standard Error: 1_000 + .saturating_add((29_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn kill_anonymous(p: u32, ) -> Weight { - (40_398_000 as Weight) - .saturating_add((182_000 as Weight).saturating_mul(p as Weight)) + (41_015_000 as Weight) + // Standard Error: 2_000 + .saturating_add((189_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_scheduler.rs b/runtime/westend/src/weights/pallet_scheduler.rs index f360917dd49c13ca76f3d62855cb957ddc872997..acef5c94203d405f70fe280fd6dd42eb0d675edd 100644 --- a/runtime/westend/src/weights/pallet_scheduler.rs +++ b/runtime/westend/src/weights/pallet_scheduler.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_scheduler +//! Autogenerated weights for pallet_scheduler +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_scheduler +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,29 +40,34 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_scheduler. pub struct WeightInfo(PhantomData); -impl pallet_scheduler::WeightInfo for WeightInfo { +impl pallet_scheduler::WeightInfo for WeightInfo { fn schedule(s: u32, ) -> Weight { - (33_047_000 as Weight) - .saturating_add((47_000 as Weight).saturating_mul(s as Weight)) + (33_809_000 as Weight) + // Standard Error: 0 + .saturating_add((43_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn cancel(s: u32, ) -> Weight { - (30_172_000 as Weight) - .saturating_add((3_053_000 as Weight).saturating_mul(s as Weight)) + (30_493_000 as Weight) + // Standard Error: 6_000 + .saturating_add((3_041_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn schedule_named(s: u32, ) -> Weight { - (42_799_000 as Weight) - .saturating_add((64_000 as Weight).saturating_mul(s as Weight)) + (43_391_000 as Weight) + // Standard Error: 1_000 + .saturating_add((62_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn cancel_named(s: u32, ) -> Weight { - (34_134_000 as Weight) - .saturating_add((3_064_000 as Weight).saturating_mul(s as Weight)) + (34_735_000 as Weight) + // Standard Error: 6_000 + .saturating_add((3_058_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_session.rs b/runtime/westend/src/weights/pallet_session.rs index 4732454bc2116b2d8e61ff9b6f7f328fb531dcf5..c5bf1d72eb4b7824421e94c995c68354d9876475 100644 --- a/runtime/westend/src/weights/pallet_session.rs +++ b/runtime/westend/src/weights/pallet_session.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_session +//! Autogenerated weights for pallet_session +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_session +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,15 +40,16 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_session. pub struct WeightInfo(PhantomData); -impl pallet_session::WeightInfo for WeightInfo { +impl pallet_session::WeightInfo for WeightInfo { fn set_keys() -> Weight { - (89_552_000 as Weight) + (91_654_000 as Weight) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } fn purge_keys() -> Weight { - (53_122_000 as Weight) + (54_360_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } diff --git a/runtime/westend/src/weights/pallet_staking.rs b/runtime/westend/src/weights/pallet_staking.rs index 35e2ebe2a861c575e27b46799b62c844daea4b86..680fe3dac8ec1baec6ef6bf6c7fbab71bfa304d6 100644 --- a/runtime/westend/src/weights/pallet_staking.rs +++ b/runtime/westend/src/weights/pallet_staking.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_staking +//! Autogenerated weights for pallet_staking +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-30, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_staking +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,137 +40,151 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_staking. pub struct WeightInfo(PhantomData); -impl pallet_staking::WeightInfo for WeightInfo { +impl pallet_staking::WeightInfo for WeightInfo { fn bond() -> Weight { - (95_041_000 as Weight) + (97_009_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (73_981_000 as Weight) + (76_157_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (67_107_000 as Weight) + (69_106_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (67_795_000 as Weight) - .saturating_add((32_000 as Weight).saturating_mul(s as Weight)) + (69_753_000 as Weight) + // Standard Error: 0 + .saturating_add((28_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (110_537_000 as Weight) - .saturating_add((3_879_000 as Weight).saturating_mul(s as Weight)) + (114_294_000 as Weight) + // Standard Error: 1_000 + .saturating_add((3_968_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (22_802_000 as Weight) + (24_191_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn nominate(n: u32, ) -> Weight { - (29_784_000 as Weight) - .saturating_add((401_000 as Weight).saturating_mul(n as Weight)) + (31_373_000 as Weight) + // Standard Error: 12_000 + .saturating_add((393_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (22_719_000 as Weight) + (23_668_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (15_354_000 as Weight) + (16_126_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (33_783_000 as Weight) + (35_127_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (3_126_000 as Weight) + (3_249_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (3_548_000 as Weight) + (3_644_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (3_474_000 as Weight) + (3_647_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (3_518_000 as Weight) + (3_604_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (3_639_000 as Weight) + (3_825_000 as Weight) + // Standard Error: 0 .saturating_add((9_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (75_900_000 as Weight) - .saturating_add((3_891_000 as Weight).saturating_mul(s as Weight)) + (77_182_000 as Weight) + // Standard Error: 1_000 + .saturating_add((3_957_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_844_744_000 as Weight) - .saturating_add((34_644_000 as Weight).saturating_mul(s as Weight)) + (5_828_506_000 as Weight) + // Standard Error: 388_000 + .saturating_add((34_623_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (145_214_000 as Weight) - .saturating_add((57_875_000 as Weight).saturating_mul(n as Weight)) + (131_768_000 as Weight) + // Standard Error: 13_000 + .saturating_add((59_048_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (173_115_000 as Weight) - .saturating_add((76_912_000 as Weight).saturating_mul(n as Weight)) + (166_310_000 as Weight) + // Standard Error: 24_000 + .saturating_add((76_868_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(12 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (46_569_000 as Weight) - .saturating_add((104_000 as Weight).saturating_mul(l as Weight)) + (47_420_000 as Weight) + // Standard Error: 2_000 + .saturating_add((99_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - .saturating_add((36_641_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 60_000 + .saturating_add((39_014_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (95_718_000 as Weight) - .saturating_add((3_875_000 as Weight).saturating_mul(s as Weight)) + (97_591_000 as Weight) + // Standard Error: 0 + .saturating_add((3_953_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - .saturating_add((1_262_144_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((143_471_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(10 as Weight)) + // Standard Error: 808_000 + .saturating_add((741_132_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 40_000 + .saturating_add((105_169_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(8 as Weight)) @@ -161,10 +192,14 @@ impl pallet_staking::WeightInfo for WeightInfo { } fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { (0 as Weight) - .saturating_add((879_000 as Weight).saturating_mul(v as Weight)) - .saturating_add((488_000 as Weight).saturating_mul(n as Weight)) - .saturating_add((133_102_000 as Weight).saturating_mul(a as Weight)) - .saturating_add((8_073_000 as Weight).saturating_mul(w as Weight)) + // Standard Error: 49_000 + .saturating_add((1_163_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 19_000 + .saturating_add((484_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 49_000 + .saturating_add((101_948_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 103_000 + .saturating_add((7_810_000 as Weight).saturating_mul(w as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) diff --git a/runtime/westend/src/weights/pallet_timestamp.rs b/runtime/westend/src/weights/pallet_timestamp.rs index 44828204ad581819b004a0814b6bbb68e0326410..1f7fc5e488c55b744df25ee5558d68fd88366414 100644 --- a/runtime/westend/src/weights/pallet_timestamp.rs +++ b/runtime/westend/src/weights/pallet_timestamp.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_timestamp +//! Autogenerated weights for pallet_timestamp +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_timestamp +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,14 +40,15 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_timestamp. pub struct WeightInfo(PhantomData); -impl pallet_timestamp::WeightInfo for WeightInfo { +impl pallet_timestamp::WeightInfo for WeightInfo { fn set() -> Weight { - (11_029_000 as Weight) + (11_097_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_finalize() -> Weight { - (6_128_000 as Weight) + (6_159_000 as Weight) } } diff --git a/runtime/westend/src/weights/pallet_utility.rs b/runtime/westend/src/weights/pallet_utility.rs index 31fe14f110405533e37f85175ebf56c9d5ec2c51..06cca4364bb12211eee11292d34059478ee6ca8e 100644 --- a/runtime/westend/src/weights/pallet_utility.rs +++ b/runtime/westend/src/weights/pallet_utility.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_utility +//! Autogenerated weights for pallet_utility +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_utility +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,17 +40,20 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_utility. pub struct WeightInfo(PhantomData); -impl pallet_utility::WeightInfo for WeightInfo { +impl pallet_utility::WeightInfo for WeightInfo { fn batch(c: u32, ) -> Weight { - (18_589_000 as Weight) - .saturating_add((1_734_000 as Weight).saturating_mul(c as Weight)) + (19_122_000 as Weight) + // Standard Error: 0 + .saturating_add((1_497_000 as Weight).saturating_mul(c as Weight)) } fn as_derivative() -> Weight { - (5_611_000 as Weight) + (5_668_000 as Weight) } fn batch_all(c: u32, ) -> Weight { - (21_104_000 as Weight) - .saturating_add((1_509_000 as Weight).saturating_mul(c as Weight)) + (19_623_000 as Weight) + // Standard Error: 0 + .saturating_add((1_497_000 as Weight).saturating_mul(c as Weight)) } } diff --git a/runtime/westend/src/weights/pallet_vesting.rs b/runtime/westend/src/weights/pallet_vesting.rs index 0215020eafcbaecd77be381897c255c6dec08eef..0afc4e98f7465af51b07a32052c3acff2965e831 100644 --- a/runtime/westend/src/weights/pallet_vesting.rs +++ b/runtime/westend/src/weights/pallet_vesting.rs @@ -13,9 +13,26 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Weights for pallet_vesting +//! Autogenerated weights for pallet_vesting +//! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-09-28, STEPS: [50], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2020-12-09, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 + +// Executed Command: +// target/release/polkadot +// benchmark +// --chain=westend-dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_vesting +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --header=./file_header.txt +// --output=./runtime/westend/src/weights/ + #![allow(unused_parens)] #![allow(unused_imports)] @@ -23,41 +40,48 @@ use frame_support::{traits::Get, weights::Weight}; use sp_std::marker::PhantomData; +/// Weight functions for pallet_vesting. pub struct WeightInfo(PhantomData); -impl pallet_vesting::WeightInfo for WeightInfo { +impl pallet_vesting::WeightInfo for WeightInfo { fn vest_locked(l: u32, ) -> Weight { - (54_300_000 as Weight) - .saturating_add((210_000 as Weight).saturating_mul(l as Weight)) + (55_027_000 as Weight) + // Standard Error: 0 + .saturating_add((130_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn vest_unlocked(l: u32, ) -> Weight { - (57_381_000 as Weight) - .saturating_add((104_000 as Weight).saturating_mul(l as Weight)) + (59_131_000 as Weight) + // Standard Error: 2_000 + .saturating_add((110_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vest_other_locked(l: u32, ) -> Weight { - (54_130_000 as Weight) - .saturating_add((215_000 as Weight).saturating_mul(l as Weight)) + (54_746_000 as Weight) + // Standard Error: 0 + .saturating_add((126_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn vest_other_unlocked(l: u32, ) -> Weight { - (57_208_000 as Weight) - .saturating_add((101_000 as Weight).saturating_mul(l as Weight)) + (58_988_000 as Weight) + // Standard Error: 2_000 + .saturating_add((106_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn vested_transfer(l: u32, ) -> Weight { - (117_560_000 as Weight) - .saturating_add((249_000 as Weight).saturating_mul(l as Weight)) + (120_685_000 as Weight) + // Standard Error: 8_000 + .saturating_add((167_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn force_vested_transfer(l: u32, ) -> Weight { - (116_476_000 as Weight) - .saturating_add((253_000 as Weight).saturating_mul(l as Weight)) + (119_814_000 as Weight) + // Standard Error: 8_000 + .saturating_add((172_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } diff --git a/scripts/adder-collator.sh b/scripts/adder-collator.sh new file mode 100755 index 0000000000000000000000000000000000000000..ca493eb0a468e72df26e9915def98c529cb6350e --- /dev/null +++ b/scripts/adder-collator.sh @@ -0,0 +1,191 @@ +#!/usr/bin/env bash + +# Run a two node local net with adder-collator. + +set -e + +chainspec="rococo-local" + +# disabled until we can actually successfully register the chain with polkadot-js-api +# if ! command -v polkadot-js-api > /dev/null; then +# echo "polkadot-js-api required; try" +# echo " sudo yarn global add @polkadot/api-cli" +# exit 1 +# fi + +PROJECT_ROOT=$(git rev-parse --show-toplevel) +# shellcheck disable=SC1090 +source "$(dirname "$0")"/common.sh + +cd "$PROJECT_ROOT" + +last_modified_rust_file=$( + find . -path ./target -prune -o -type f -name '*.rs' -printf '%T@ %p\n' | + sort -nr | + head -1 | + cut -d' ' -f2- +) + +polkadot="target/release/polkadot" +adder_collator="target/release/adder-collator" + +# ensure the polkadot binary exists and is up to date +if [ ! -x "$polkadot" ] || [ "$polkadot" -ot "$last_modified_rust_file" ]; then + cargo build --release --features real-overseer +fi +# likewise for the adder collator +if [ ! -x "$adder_collator" ] || [ "$adder_collator" -ot "$last_modified_rust_file" ]; then + cargo build --release --features real-overseer -p test-parachain-adder-collator +fi + +genesis="$(mktemp --directory)" +genesis_state="$genesis/state" +validation_code="$genesis/validation_code" + +"$adder_collator" export-genesis-state > "$genesis_state" +"$adder_collator" export-genesis-wasm > "$validation_code" + + +# setup variables +node_offset=0 +declare -a node_pids +declare -a node_pipes + +# create a sed expression which injects the node name and stream type into each line +function make_sed_expr() { + name="$1" + type="$2" + + printf "s/^/%16s %s: /" "$name" "$type" +} + +# turn a string into a flag +function flagify() { + printf -- '--%s' "$(tr '[:upper:]' '[:lower:]' <<< "$1")" +} + +# start a node and label its output +# +# This function takes a single argument, the node name. +# The name must be one of those which can be passed to the polkadot binary, in un-flagged form, +# one of: +# alice, bob, charlie, dave, eve, ferdie, one, two +function run_node() { + name="$1" + # create a named pipe so we can get the node's PID while also sedding its output + local stdout + local stderr + stdout=$(mktemp --dry-run --tmpdir) + stderr=$(mktemp --dry-run --tmpdir) + mkfifo "$stdout" + mkfifo "$stderr" + node_pipes+=("$stdout") + node_pipes+=("$stderr") + + # compute ports from offset + local port=$((30333+node_offset)) + local rpc_port=$((9933+node_offset)) + local ws_port=$((9944+node_offset)) + local prometheus_port=$((9615+node_offset)) + node_offset=$((node_offset+1)) + + # start the node + "$polkadot" \ + --chain "$chainspec" \ + --tmp \ + --port "$port" \ + --rpc-port "$rpc_port" \ + --ws-port "$ws_port" \ + --prometheus-port "$prometheus_port" \ + --rpc-cors all \ + "$(flagify "$name")" \ + > "$stdout" \ + 2> "$stderr" \ + & + local pid=$! + node_pids+=("$pid") + + # send output from the stdout pipe to stdout, prepending the node name + sed -e "$(make_sed_expr "$name" "OUT")" "$stdout" >&1 & + # send output from the stderr pipe to stderr, prepending the node name + sed -e "$(make_sed_expr "$name" "ERR")" "$stderr" >&2 & +} + +# start an adder collator and label its output +# +# This function takes a single argument, the node name. This affects only the tagging. +function run_adder_collator() { + name="$1" + # create a named pipe so we can get the node's PID while also sedding its output + local stdout + local stderr + stdout=$(mktemp --dry-run --tmpdir) + stderr=$(mktemp --dry-run --tmpdir) + mkfifo "$stdout" + mkfifo "$stderr" + node_pipes+=("$stdout") + node_pipes+=("$stderr") + + # compute ports from offset + local port=$((30333+node_offset)) + local rpc_port=$((9933+node_offset)) + local ws_port=$((9944+node_offset)) + local prometheus_port=$((9615+node_offset)) + node_offset=$((node_offset+1)) + + # start the node + "$adder_collator" \ + --chain "$chainspec" \ + --tmp \ + --port "$port" \ + --rpc-port "$rpc_port" \ + --ws-port "$ws_port" \ + --prometheus-port "$prometheus_port" \ + --rpc-cors all \ + > "$stdout" \ + 2> "$stderr" \ + & + local pid=$! + node_pids+=("$pid") + + # send output from the stdout pipe to stdout, prepending the node name + sed -e "$(make_sed_expr "$name" "OUT")" "$stdout" >&1 & + # send output from the stderr pipe to stderr, prepending the node name + sed -e "$(make_sed_expr "$name" "ERR")" "$stderr" >&2 & +} + + +# clean up the nodes when this script exits +function finish { + for node_pid in "${node_pids[@]}"; do + kill -9 "$node_pid" + done + for node_pipe in "${node_pipes[@]}"; do + rm "$node_pipe" + done + rm -rf "$genesis" +} +trap finish EXIT + +# start the nodes +run_node Alice +run_node Bob +run_adder_collator AdderCollator + +# register the adder collator +# doesn't work yet due to https://github.com/polkadot-js/tools/issues/185 +# polkadot-js-api \ +# --ws ws://localhost:9944 \ +# --sudo \ +# --seed "//Alice" \ +# tx.registrar.registerPara \ +# 100 \ +# '{"scheduling":"Always"}' \ +# "@$validation_code" \ +# "@$genesis_state" + +# now wait; this will exit on its own only if both subprocesses exit +# the practical implication, as both subprocesses are supposed to run forever, is that +# this script will also run forever, until killed, at which point the exit trap should kill +# the subprocesses +wait diff --git a/scripts/docker/release.Dockerfile b/scripts/docker/release.Dockerfile index 517368ce2a79ba3d60951ec34426a6dcd11a7dd1..b4d3a786ecde77b0dd4bf16964d062fbc1fa4882 100644 --- a/scripts/docker/release.Dockerfile +++ b/scripts/docker/release.Dockerfile @@ -3,6 +3,7 @@ FROM debian:buster-slim # metadata ARG VCS_REF ARG BUILD_DATE +ARG POLKADOT_VERSION LABEL io.parity.image.authors="devops-team@parity.io" \ io.parity.image.vendor="Parity Technologies" \ @@ -18,21 +19,21 @@ ENV RUST_BACKTRACE 1 # install tools and dependencies RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ libssl1.1 \ ca-certificates \ curl \ gnupg && \ + useradd -m -u 1000 -U -s /bin/sh -d /polkadot polkadot && \ gpg --recv-keys --keyserver hkps://keys.mailvelope.com 9D4B2B6EB8F97156D19669A9FF0812D491B96798 && \ gpg --export 9D4B2B6EB8F97156D19669A9FF0812D491B96798 > /usr/share/keyrings/parity.gpg && \ echo 'deb [signed-by=/usr/share/keyrings/parity.gpg] https://releases.parity.io/deb release main' > /etc/apt/sources.list.d/parity.list && \ - apt update && \ - apt install polkadot && \ + apt-get update && \ + apt-get install -y --no-install-recommends polkadot=${POLKADOT_VERSION#?} && \ # apt cleanup apt-get autoremove -y && \ apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete + rm -rf /var/lib/apt/lists/* USER polkadot diff --git a/scripts/github/generate_release_text.rb b/scripts/github/generate_release_text.rb index caca24d77a24f844f2376855e455c2abb100ecf8..3b2a885eed9a57e11ec991eadddc28347eb64037 100644 --- a/scripts/github/generate_release_text.rb +++ b/scripts/github/generate_release_text.rb @@ -24,7 +24,7 @@ renderer = ERB.new( ) # get ref of last polkadot release -last_ref = "refs/tags/" + github_client.latest_release(ENV['GITHUB_REPOSITORY']).tag_name +last_ref = 'refs/tags/' + github_client.latest_release(ENV['GITHUB_REPOSITORY']).tag_name polkadot_cl = Changelog.new( 'paritytech/polkadot', last_ref, current_ref, token: token @@ -37,7 +37,7 @@ def get_substrate_commit(client, ref) client.contents( ENV['GITHUB_REPOSITORY'], path: 'Cargo.lock', - query: { ref: "#{ref}"} + query: { ref: ref.to_s } ).content ) ).parsed @@ -81,7 +81,9 @@ runtime_changes.each do |c| c[:pretty_title] = "✅ `trivial` #{c[:pretty_title]}" end -release_priority = Changelog.highest_priority_for_changes(all_changes) +# The priority of users upgraded is determined by the highest-priority +# *Client* change +release_priority = Changelog.highest_priority_for_changes(client_changes) # Pulled from the previous Github step rustc_stable = ENV['RUSTC_STABLE'] diff --git a/scripts/github/polkadot_release.erb b/scripts/github/polkadot_release.erb index dde7165e92efda4b5789f1a20361050b5dc1808b..2078fa3bb96f3e464980bee4b550a831f8df160f 100644 --- a/scripts/github/polkadot_release.erb +++ b/scripts/github/polkadot_release.erb @@ -11,7 +11,7 @@ This release was tested against the following versions of `rustc`. Other version - <%= rustc_stable %> - <%= rustc_nightly %> -WASM runtimes built with [srtool](https://gitlab.com/chevdor/srtool) using `<%= polkadot_json['rustc'] %>`. +WASM runtimes built with [srtool](https://github.com/paritytech/srtool) using `<%= polkadot_json['rustc'] %>`. Proposal hashes: * `polkadot_runtime-v<%= polkadot_runtime %>.compact.wasm - <%= polkadot_json['prop'] %>` diff --git a/scripts/gitlab/check_extrinsics_ordering.sh b/scripts/gitlab/check_extrinsics_ordering.sh new file mode 100755 index 0000000000000000000000000000000000000000..cfdad6369158c72b83b927da7a2582db74943d32 --- /dev/null +++ b/scripts/gitlab/check_extrinsics_ordering.sh @@ -0,0 +1,59 @@ +#!/bin/bash +BIN=./target/release/polkadot +LIVE_WS=wss://rpc.polkadot.io +LOCAL_WS=ws://localhost:9944 + +# Kill the polkadot client before exiting +trap 'kill "$(jobs -p)"' EXIT + +runtimes=( + "westend" + "kusama" + "polkadot" +) + +for RUNTIME in "${runtimes[@]}"; do + echo "[+] Checking runtime: ${RUNTIME}" + + release_transaction_version=$( + git show "origin/release:runtime/${RUNTIME}/src/lib.rs" | \ + grep 'transaction_version' + ) + + current_transaction_version=$( + grep 'transaction_version' "./runtime/${RUNTIME}/src/lib.rs" + ) + + echo "[+] Release: ${release_transaction_version}" + echo "[+] Ours: ${current_transaction_version}" + + if [ ! "$release_transaction_version" = "$current_transaction_version" ]; then + echo "[+] Transaction version for ${RUNTIME} has been bumped since last release." + exit 0 + fi + + if [ "$RUNTIME" = 'polkadot' ]; then + LIVE_WS="wss://rpc.polkadot.io" + else + LIVE_WS="wss://${RUNTIME}-rpc.polkadot.io" + fi + + # Start running the local polkadot node in the background + $BIN --chain="$RUNTIME-local" & + jobs + + changed_extrinsics=$( + polkadot-js-metadata-cmp "$LIVE_WS" "$LOCAL_WS" \ + | sed 's/^ \+//g' | grep -e 'idx: [0-9]\+ -> [0-9]\+' + ) + + if [ -n "$changed_extrinsics" ]; then + echo "[!] Extrinsics indexing/ordering has changed in the ${RUNTIME} runtime! If this change is intentional, please bump transaction_version in lib.rs. Changed extrinsics:" + echo "$changed_extrinsics" + exit 1 + fi + + echo "[+] No change in extrinsics ordering for the ${RUNTIME} runtime" + kill "$(jobs -p)"; sleep 5 +done + diff --git a/scripts/gitlab/check_runtime.sh b/scripts/gitlab/check_runtime.sh index 7b2a4a1fafac6527a739367bc4242f217d1fc117..21dfc74be923970a8e8c2a89886a57af26081e0b 100755 --- a/scripts/gitlab/check_runtime.sh +++ b/scripts/gitlab/check_runtime.sh @@ -24,7 +24,10 @@ SUBSTRATE_REPO_CARGO="git\+${SUBSTRATE_REPO}" SUBSTRATE_VERSIONS_FILE="bin/node/runtime/src/lib.rs" # figure out the latest release tag -LATEST_TAG="$(git tag -l | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' | sort -V | tail -n 1)" +boldprint "make sure we have all tags (including those from the release branch)" +git fetch --depth="${GIT_DEPTH:-100}" origin release +git fetch --depth="${GIT_DEPTH:-100}" origin 'refs/tags/*:refs/tags/*' +LATEST_TAG="$(git tag -l | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+-?[0-9]*$' | sort -V | tail -n 1)" boldprint "latest release tag ${LATEST_TAG}" boldprint "latest 10 commits of ${CI_COMMIT_REF_NAME}" @@ -33,6 +36,7 @@ git --no-pager log --graph --oneline --decorate=short -n 10 boldprint "make sure the master branch is available in shallow clones" git fetch --depth="${GIT_DEPTH:-100}" origin master + runtimes=( "kusama" "polkadot" diff --git a/scripts/gitlab/test_deterministic_wasm.sh b/scripts/gitlab/test_deterministic_wasm.sh index db391ca0a2fde43b64c5412eb06570e9978f15b2..998d924d4563687e4602d987a1f23fc9b618131d 100755 --- a/scripts/gitlab/test_deterministic_wasm.sh +++ b/scripts/gitlab/test_deterministic_wasm.sh @@ -6,7 +6,7 @@ source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh # build runtime WASM_BUILD_NO_COLOR=1 cargo build --verbose --release -p kusama-runtime -p polkadot-runtime -p westend-runtime # make checksum -sha256sum target/release/wbuild/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256 +sha256sum target/release/wbuild/*-runtime/target/wasm32-unknown-unknown/release/*.wasm > checksum.sha256 # clean up - FIXME: can we reuse some of the artifacts? cargo clean # build again diff --git a/scripts/gitlab/test_linux_stable.sh b/scripts/gitlab/test_linux_stable.sh index a18ff43874097811ef2d26a68b22d53f0234a5a9..b841d8abecf42d4f76564413325f535a2a20bedb 100755 --- a/scripts/gitlab/test_linux_stable.sh +++ b/scripts/gitlab/test_linux_stable.sh @@ -1,6 +1,10 @@ #!/usr/bin/env bash +set -e #shellcheck source=lib.sh source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" -time cargo test --all --release --verbose --locked --features runtime-benchmarks +time cargo test --all --release --verbose --locked --features=runtime-benchmarks --features=real-overseer + +cd parachain/test-parachains/adder/collator/ +time cargo test --release --verbose --locked --features=real-overseer diff --git a/scripts/two-node-local-net.sh b/scripts/two-node-local-net.sh index 4e3291b015ad569f129d2234000c083cbbb9d9e1..16db4304f15553302e74053e36c3ec0f150a6f4a 100755 --- a/scripts/two-node-local-net.sh +++ b/scripts/two-node-local-net.sh @@ -27,7 +27,7 @@ polkadot="target/release/polkadot" # ensure the polkadot binary exists and is up to date if [ ! -x "$polkadot" ] || [ "$polkadot" -ot "$last_modified_rust_file" ]; then - cargo build --release + cargo build --release --features real-overseer fi # setup variables diff --git a/src/main.rs b/src/main.rs index d26fa4e3be2da2e5f5fea4801193c5c2fb57f8fc..d7baf303970ce2f3034fee292b98990b01476e23 100644 --- a/src/main.rs +++ b/src/main.rs @@ -20,40 +20,8 @@ use color_eyre::eyre; -use cli::Error as PolkaError; - -use std::{error, fmt}; - -/// A helper to satisfy the requirements of `eyre` -/// compatible errors, which require `Send + Sync` -/// which are not satisfied by the `sp_*` crates. -#[derive(Debug)] -struct ErrorWrapper(std::sync::Arc); - -// nothing is going to be sent to another thread -// it merely exists to glue two distinct error -// types together where the requirements differ -// with `Sync + Send` and without them for `wasm`. -unsafe impl Sync for ErrorWrapper {} -unsafe impl Send for ErrorWrapper {} - -impl error::Error for ErrorWrapper { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - (&*self.0).source().and_then(|e| e.source()) - } - fn description(&self) -> &str { - "Error Wrapper" - } -} - -impl fmt::Display for ErrorWrapper { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", &*self.0) - } -} - fn main() -> eyre::Result<()> { color_eyre::install()?; - cli::run().map_err(|e| ErrorWrapper(std::sync::Arc::new(e)))?; + cli::run()?; Ok(()) } diff --git a/statement-table/Cargo.toml b/statement-table/Cargo.toml index 3b3e8bd841976f3bfabafadd59c02692e0e305dd..86b51d4f48242cd5dd2b774a220a4f9b2d5053d2 100644 --- a/statement-table/Cargo.toml +++ b/statement-table/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "polkadot-statement-table" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } primitives = { package = "polkadot-primitives", path = "../primitives" } diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index cb95d74d62237f48fd38aa25d0cc8bcf3456cda8..fe51965a232e652abc0c42aa18bae6bda07f9d29 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -30,7 +30,7 @@ use std::fmt::Debug; use primitives::v1::{ValidityAttestation as PrimitiveValidityAttestation, ValidatorSignature}; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; /// Context for the statement table. pub trait Context { @@ -159,7 +159,7 @@ enum ValidityVote { } /// A summary of import of a statement. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq, Debug)] pub struct Summary { /// The digest of the candidate referenced. pub candidate: D, @@ -256,15 +256,14 @@ impl CandidateData { // if it has enough validity votes // and no authorities have called it bad. fn can_be_included(&self, validity_threshold: usize) -> bool { - self.indicated_bad_by.is_empty() - && self.validity_votes.len() >= validity_threshold + self.validity_votes.len() >= validity_threshold } fn summary(&self, digest: C::Digest) -> Summary { Summary { candidate: digest, group_id: self.group_id.clone(), - validity_votes: self.validity_votes.len() - self.indicated_bad_by.len(), + validity_votes: self.validity_votes.len(), signalled_bad: self.indicated_bad(), } } @@ -362,6 +361,20 @@ impl Table { }) } + /// Get the attested candidate for `digest`. + /// + /// Returns `Some(_)` if the candidate exists and is includable. + pub fn attested_candidate(&self, digest: &C::Digest, context: &C) + -> Option> + { + self.candidate_votes.get(digest).and_then(|data| { + let v_threshold = context.requisite_votes(&data.group_id); + data.attested(v_threshold) + }) + } + /// Import a signed statement. Signatures should be checked for validity, and the /// sender should be checked to actually be an authority. /// @@ -489,7 +502,7 @@ impl Table { if new_proposal { self.candidate_votes.entry(digest.clone()).or_insert_with(move || CandidateData { group_id: group, - candidate: candidate, + candidate, validity_votes: HashMap::new(), indicated_bad_by: Vec::new(), }); @@ -581,7 +594,7 @@ impl Table { } Entry::Vacant(vacant) => { if let ValidityVote::Invalid(_) = vote { - votes.indicated_bad_by.push(from); + votes.indicated_bad_by.push(from.clone()); } vacant.insert(vote); @@ -595,7 +608,12 @@ impl Table { } } -fn update_includable_count(map: &mut HashMap, group_id: &G, was_includable: bool, is_includable: bool) { +fn update_includable_count( + map: &mut HashMap, + group_id: &G, + was_includable: bool, + is_includable: bool, +) { if was_includable && !is_includable { if let Entry::Occupied(mut entry) = map.entry(group_id.clone()) { *entry.get_mut() -= 1; @@ -989,7 +1007,7 @@ mod tests { candidate.indicated_bad_by.push(AuthorityId(1024)); - assert!(!candidate.can_be_included(validity_threshold)); + assert!(candidate.can_be_included(validity_threshold)); } #[test] @@ -1039,8 +1057,8 @@ mod tests { table.import_statement(&context, vote); assert!(!table.detected_misbehavior.contains_key(&AuthorityId(3))); - assert!(!table.candidate_includable(&candidate_digest, &context)); - assert!(table.includable_count.is_empty()); + assert!(table.candidate_includable(&candidate_digest, &context)); + assert!(table.includable_count.get(&GroupId(2)).is_some()); } #[test] diff --git a/statement-table/src/lib.rs b/statement-table/src/lib.rs index fed60ded0da2a08a60e82d37ec80c3951e1a810b..a00b582b7dc7d6ab0fdd4c5b22995cb7023ccf6a 100644 --- a/statement-table/src/lib.rs +++ b/statement-table/src/lib.rs @@ -18,63 +18,21 @@ pub mod generic; pub use generic::{Table, Context}; -/// Concrete instantiations suitable for v0 primitives. -pub mod v0 { - use crate::generic; - use primitives::v0::{ - Hash, - Id, AbridgedCandidateReceipt, CompactStatement as PrimitiveStatement, ValidatorSignature, ValidatorIndex, - }; - - /// Statements about candidates on the network. - pub type Statement = generic::Statement; - - /// Signed statements about candidates. - pub type SignedStatement = generic::SignedStatement< - AbridgedCandidateReceipt, - Hash, - ValidatorIndex, - ValidatorSignature, - >; - - /// Kinds of misbehavior, along with proof. - pub type Misbehavior = generic::Misbehavior< - AbridgedCandidateReceipt, - Hash, - ValidatorIndex, - ValidatorSignature, - >; - - /// A summary of import of a statement. - pub type Summary = generic::Summary; - - impl<'a> From<&'a Statement> for PrimitiveStatement { - fn from(s: &'a Statement) -> PrimitiveStatement { - match *s { - generic::Statement::Valid(s) => PrimitiveStatement::Valid(s), - generic::Statement::Invalid(s) => PrimitiveStatement::Invalid(s), - generic::Statement::Candidate(ref s) => PrimitiveStatement::Candidate(s.hash()), - } - } - } -} - /// Concrete instantiations suitable for v1 primitives. pub mod v1 { use crate::generic; use primitives::v1::{ - Hash, - Id, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, + CandidateHash, Id, CommittedCandidateReceipt, CompactStatement as PrimitiveStatement, ValidatorSignature, ValidatorIndex, }; /// Statements about candidates on the network. - pub type Statement = generic::Statement; + pub type Statement = generic::Statement; /// Signed statements about candidates. pub type SignedStatement = generic::SignedStatement< CommittedCandidateReceipt, - Hash, + CandidateHash, ValidatorIndex, ValidatorSignature, >; @@ -82,13 +40,13 @@ pub mod v1 { /// Kinds of misbehavior, along with proof. pub type Misbehavior = generic::Misbehavior< CommittedCandidateReceipt, - Hash, + CandidateHash, ValidatorIndex, ValidatorSignature, >; /// A summary of import of a statement. - pub type Summary = generic::Summary; + pub type Summary = generic::Summary; impl<'a> From<&'a Statement> for PrimitiveStatement { fn from(s: &'a Statement) -> PrimitiveStatement { diff --git a/validation/Cargo.toml b/validation/Cargo.toml index 1519227b7767011cfdba112d81d23120b669caee..4902c809377a003684cb2b7710a0eb82af5bf6ff 100644 --- a/validation/Cargo.toml +++ b/validation/Cargo.toml @@ -1,31 +1,31 @@ [package] name = "polkadot-validation" -version = "0.8.26" +version = "0.8.27" authors = ["Parity Technologies "] edition = "2018" [dependencies] polkadot-primitives = { path = "../primitives" } parachain = { package = "polkadot-parachain", path = "../parachain" } -sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "master" } -sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "master" } -sp-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -consensus = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "master" } -runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "master" } -futures = "0.3.4" -log = "0.4.8" -codec = { package = "parity-scale-codec", version = "1.3.4", default-features = false, features = ["derive"] } -grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "master" } -inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "master" } -primitives = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "master" } -txpool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "master" } -sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" } -block-builder = { package = "sc-block-builder", git = "https://github.com/paritytech/substrate", branch = "master" } -trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "master" } -babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "master" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "master" } -thiserror = "1.0.21" +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-timestamp = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-basic-authorship = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +consensus = { package = "sp-consensus", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +runtime_primitives = { package = "sp-runtime", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +futures = "0.3.8" +log = "0.4.11" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } +grandpa = { package = "sc-finality-grandpa", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +inherents = { package = "sp-inherents", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +primitives = { package = "sp-core", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +txpool-api = { package = "sp-transaction-pool", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +block-builder = { package = "sc-block-builder", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +trie = { package = "sp-trie", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +babe-primitives = { package = "sp-consensus-babe", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } +thiserror = "1.0.22" [dev-dependencies] -sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27" } diff --git a/xcm/Cargo.toml b/xcm/Cargo.toml index 0587613be6a1a334efbebbecb5e483260ae302e5..27151453a73637672100811aed64afe61230fe74 100644 --- a/xcm/Cargo.toml +++ b/xcm/Cargo.toml @@ -6,11 +6,11 @@ description = "The basic XCM datastructures." edition = "2018" [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.5", default-features = false, features = [ "derive" ] } +parity-scale-codec = { version = "1.3.5", default-features = false, features = [ "derive" ] } [features] default = ["std"] wasm-api = [] std = [ - "codec/std", + "parity-scale-codec/std", ] diff --git a/xcm/src/lib.rs b/xcm/src/lib.rs index 1356c12e177b874bbe267b7b0b8184da9e6a4f69..3dc99e07c705b6c35035a964acd93e468b0253b4 100644 --- a/xcm/src/lib.rs +++ b/xcm/src/lib.rs @@ -23,7 +23,7 @@ #![no_std] extern crate alloc; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; pub mod v0; diff --git a/xcm/src/v0/junction.rs b/xcm/src/v0/junction.rs index 1ea9bff2e4a67806c4a03e658daac90c3fb230de..df11ab3fdedeac526e073401228a730372c8f85e 100644 --- a/xcm/src/v0/junction.rs +++ b/xcm/src/v0/junction.rs @@ -17,7 +17,7 @@ //! Support datastructures for `MultiLocation`, primarily the `Junction` datatype. use alloc::vec::Vec; -use codec::{self, Encode, Decode}; +use parity_scale_codec::{self, Encode, Decode}; /// A global identifier of an account-bearing consensus system. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Encode, Decode, Debug)] diff --git a/xcm/src/v0/mod.rs b/xcm/src/v0/mod.rs index d371f1e80976635dc73c4988fa38b0b14e5d7a11..c69093d4f8511523ff41aa166cc935304ff08385 100644 --- a/xcm/src/v0/mod.rs +++ b/xcm/src/v0/mod.rs @@ -19,7 +19,7 @@ use core::{result, convert::TryFrom}; use alloc::{boxed::Box, vec::Vec}; -use codec::{self, Encode, Decode}; +use parity_scale_codec::{self, Encode, Decode}; use super::{VersionedXcm, VersionedMultiAsset}; mod junction; @@ -158,6 +158,51 @@ pub enum Xcm { /// /// Errors: RelayedFrom { superorigin: MultiLocation, inner: Box }, + + /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by the + /// relay-chain to a para. + /// + /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel opening. + /// - `max_message_size`: The maximum size of a message proposed by the sender. + /// - `max_capacity`: The maximum number of messages that can be queued in the channel. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + HrmpNewChannelOpenRequest { + #[codec(compact)] sender: u32, + #[codec(compact)] max_message_size: u32, + #[codec(compact)] max_capacity: u32, + }, + + /// A message to notify about that a previously sent open channel request has been accepted by + /// the recipient. That means that the channel will be opened during the next relay-chain session + /// change. This message is meant to be sent by the relay-chain to a para. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + /// + /// Errors: + HrmpChannelAccepted { + #[codec(compact)] recipient: u32, + }, + + /// A message to notify that the other party in an open channel decided to close it. In particular, + /// `inititator` is going to close the channel opened from `sender` to the `recipient`. The close + /// will be enacted at the next relay-chain session change. This message is meant to be sent by + /// the relay-chain to a para. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + /// + /// Errors: + HrmpChannelClosing { + #[codec(compact)] initiator: u32, + #[codec(compact)] sender: u32, + #[codec(compact)] recipient: u32, + }, } impl From for VersionedXcm { diff --git a/xcm/src/v0/multi_asset.rs b/xcm/src/v0/multi_asset.rs index 22bcf0cf74c073089944341453128b738e8609de..700bc78d60ba6b5b097dd59b273945c39e2bc8bb 100644 --- a/xcm/src/v0/multi_asset.rs +++ b/xcm/src/v0/multi_asset.rs @@ -19,7 +19,7 @@ use core::{result, convert::TryFrom}; use alloc::vec::Vec; -use codec::{self, Encode, Decode}; +use parity_scale_codec::{self, Encode, Decode}; use super::{MultiLocation, VersionedMultiAsset}; /// A general identifier for an instance of a non-fungible asset class. diff --git a/xcm/src/v0/multi_location.rs b/xcm/src/v0/multi_location.rs index 0ff0776cea38d720e41b3c6c1b96dce0fe20e14c..ba3ef8c827dba67ec05cee780d5a37b598f83b01 100644 --- a/xcm/src/v0/multi_location.rs +++ b/xcm/src/v0/multi_location.rs @@ -18,7 +18,7 @@ use core::{result, mem, convert::TryFrom}; -use codec::{self, Encode, Decode}; +use parity_scale_codec::{self, Encode, Decode}; use super::Junction; use crate::VersionedMultiLocation; diff --git a/xcm/src/v0/order.rs b/xcm/src/v0/order.rs index 5f2d9da072d80ca8dec02c7b09a87ad9695a5fe2..df7a215015ec42eab346bf35ec7f8575fe791746 100644 --- a/xcm/src/v0/order.rs +++ b/xcm/src/v0/order.rs @@ -17,7 +17,7 @@ //! Version 0 of the Cross-Consensus Message format data structures. use alloc::vec::Vec; -use codec::{self, Encode, Decode}; +use parity_scale_codec::{self, Encode, Decode}; use super::{MultiAsset, MultiLocation}; /// An instruction to be executed on some or all of the assets in holding, used by asset-related XCM messages. diff --git a/xcm/src/v0/traits.rs b/xcm/src/v0/traits.rs index f88b1af53ab2185d76dfced23eb90df58718c24a..661e71fe0877da6891cfec4e4c461719847fac77 100644 --- a/xcm/src/v0/traits.rs +++ b/xcm/src/v0/traits.rs @@ -17,7 +17,7 @@ //! Cross-Consensus Message format data structures. use core::result; -use codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode}; use super::{MultiLocation, Xcm}; diff --git a/xcm/xcm-builder/Cargo.toml b/xcm/xcm-builder/Cargo.toml index 512cb5e41107910c9f89fe72faedc8e9fae1856f..780b8ed9a1422309698f22e29d6e793ff5b37eb2 100644 --- a/xcm/xcm-builder/Cargo.toml +++ b/xcm/xcm-builder/Cargo.toml @@ -6,14 +6,14 @@ description = "Tools & types for building with XCM and its executor." version = "0.8.22" [dependencies] -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } xcm = { path = "..", default-features = false } xcm-executor = { path = "../xcm-executor", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } # Polkadot dependencies polkadot-parachain = { path = "../../parachain", default-features = false } @@ -21,7 +21,7 @@ polkadot-parachain = { path = "../../parachain", default-features = false } [features] default = ["std"] std = [ - "codec/std", + "parity-scale-codec/std", "xcm/std", "xcm-executor/std", "sp-std/std", diff --git a/xcm/xcm-builder/src/currency_adapter.rs b/xcm/xcm-builder/src/currency_adapter.rs index 0986d5b2e489cda3eaa4569728d55eec2b391d46..09b61ab6bb57b3e15ffa14e4e580e1f34104c144 100644 --- a/xcm/xcm-builder/src/currency_adapter.rs +++ b/xcm/xcm-builder/src/currency_adapter.rs @@ -17,7 +17,7 @@ use sp_std::{result, convert::TryInto, marker::PhantomData}; use xcm::v0::{Error, Result, MultiAsset, MultiLocation}; use sp_arithmetic::traits::SaturatedConversion; -use frame_support::traits::{ExistenceRequirement::AllowDeath, WithdrawReason}; +use frame_support::traits::{ExistenceRequirement::AllowDeath, WithdrawReasons}; use xcm_executor::traits::{MatchesFungible, LocationConversion, TransactAsset}; pub struct CurrencyAdapter( @@ -36,7 +36,7 @@ impl< fn deposit_asset(what: &MultiAsset, who: &MultiLocation) -> Result { // Check we handle this asset. - let amount = Matcher::matches_fungible(&what).ok_or(())?.saturated_into(); + let amount: u128 = Matcher::matches_fungible(&what).ok_or(())?.saturated_into(); let who = AccountIdConverter::from_location(who).ok_or(())?; let balance_amount = amount.try_into().map_err(|_| ())?; let _imbalance = Currency::deposit_creating(&who, balance_amount); @@ -45,10 +45,10 @@ impl< fn withdraw_asset(what: &MultiAsset, who: &MultiLocation) -> result::Result { // Check we handle this asset. - let amount = Matcher::matches_fungible(&what).ok_or(())?.saturated_into(); + let amount: u128 = Matcher::matches_fungible(&what).ok_or(())?.saturated_into(); let who = AccountIdConverter::from_location(who).ok_or(())?; let balance_amount = amount.try_into().map_err(|_| ())?; - Currency::withdraw(&who, balance_amount, WithdrawReason::Transfer.into(), AllowDeath).map_err(|_| ())?; + Currency::withdraw(&who, balance_amount, WithdrawReasons::TRANSFER, AllowDeath).map_err(|_| ())?; Ok(what.clone()) } } diff --git a/xcm/xcm-builder/src/location_conversion.rs b/xcm/xcm-builder/src/location_conversion.rs index 54d27dad8e9ac3b1246154390b2a636971abb20b..88575b6df61b7698db4a9dd6149420bdebed99b9 100644 --- a/xcm/xcm-builder/src/location_conversion.rs +++ b/xcm/xcm-builder/src/location_conversion.rs @@ -18,7 +18,7 @@ use sp_std::marker::PhantomData; use sp_io::hashing::blake2_256; use sp_runtime::traits::AccountIdConversion; use frame_support::traits::Get; -use codec::Encode; +use parity_scale_codec::Encode; use xcm::v0::{MultiLocation, NetworkId, Junction}; use xcm_executor::traits::LocationConversion; diff --git a/xcm/xcm-executor/Cargo.toml b/xcm/xcm-executor/Cargo.toml index 4eaa4833b2c20c1d31951114f936f16913a028aa..2a7e8823c5adb59d8502679d95a0a28414175f20 100644 --- a/xcm/xcm-executor/Cargo.toml +++ b/xcm/xcm-executor/Cargo.toml @@ -6,20 +6,20 @@ description = "An abstract and configurable XCM message executor." version = "0.8.22" [dependencies] -impl-trait-for-tuples = "0.1.3" -codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +impl-trait-for-tuples = "0.2.0" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["derive"] } xcm = { path = "..", default-features = false } -sp-std = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-io = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-core = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } -frame-support = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-arithmetic = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +sp-runtime = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } +frame-support = { git = "https://github.com/paritytech/substrate", branch = "polkadot-release-v0.8.27", default-features = false } [features] default = ["std"] std = [ - "codec/std", + "parity-scale-codec/std", "xcm/std", "sp-std/std", "sp-io/std", diff --git a/xcm/xcm-executor/src/lib.rs b/xcm/xcm-executor/src/lib.rs index 15cabd5088948c5e86c6f7ab3c71325de1815fcc..5bef32bf1eb5872179f3ff48b05beda49aa7c189 100644 --- a/xcm/xcm-executor/src/lib.rs +++ b/xcm/xcm-executor/src/lib.rs @@ -18,7 +18,7 @@ use sp_std::{prelude::*, marker::PhantomData, convert::TryInto}; use frame_support::{ensure, dispatch::Dispatchable}; -use codec::Decode; +use parity_scale_codec::Decode; use xcm::v0::{ Xcm, Order, ExecuteXcm, SendXcm, Error as XcmError, Result as XcmResult, MultiLocation, MultiAsset, Junction,